options(scipen = 999, digits = 4)
knitr::opts_chunk$set(comment = "#")
r = getOption("repos")
r["CRAN"] = "http://cran.us.r-project.org"

options(repos = r)

## load required packages
ipak <- function (pkg) {
  new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
  if (length(new.pkg)) 
    install.packages(new.pkg, dependencies = TRUE)
  sapply(pkg, require, character.only = TRUE)
}

packages <- c("tidyverse", "Hmisc", "lattice", "multcomp", "lsmeans", "schoRsch", "influence.ME", "lme4", "effects", "lmerTest", "cowplot", "irr", "simr", "plyr", "dplyr","patchwork", "wesanderson", "MuMIn", "devtools", "dplyr", "ggResidpanel", "HLMdiag", "mixed", "sjPlot", "effectsize")

ipak(packages)
## Warning: package 'mixed' is not available (for R version 4.0.2)
## Warning: replacing previous import 'lifecycle::last_warnings' by
## 'rlang::last_warnings' when loading 'tibble'
## Warning: replacing previous import 'lifecycle::last_warnings' by
## 'rlang::last_warnings' when loading 'pillar'
## Warning: replacing previous import 'vctrs::data_frame' by 'tibble::data_frame'
## when loading 'dplyr'
## Warning in library(package, lib.loc = lib.loc, character.only = TRUE,
## logical.return = TRUE, : there is no package called 'mixed'
##    tidyverse        Hmisc      lattice     multcomp      lsmeans     schoRsch 
##         TRUE         TRUE         TRUE         TRUE         TRUE         TRUE 
## influence.ME         lme4      effects     lmerTest      cowplot          irr 
##         TRUE         TRUE         TRUE         TRUE         TRUE         TRUE 
##         simr         plyr        dplyr    patchwork  wesanderson        MuMIn 
##         TRUE         TRUE         TRUE         TRUE         TRUE         TRUE 
##     devtools        dplyr ggResidpanel      HLMdiag        mixed       sjPlot 
##         TRUE         TRUE         TRUE         TRUE        FALSE         TRUE 
##   effectsize 
##         TRUE
# set summed contrasts
options(contrasts = c("contr.sum", "contr.poly")) 

# fix bs with dplyr 
detach("package:dplyr", unload = TRUE)
## Warning: 'dplyr' namespace cannot be unloaded:
##   namespace 'dplyr' is imported by 'tidyr', 'sjPlot', 'plotly', 'broom', 'janitor', 'dbplyr', 'sjmisc', 'sjstats', 'qqplotr', 'HLMdiag' so cannot be unloaded
library("dplyr")
ind.data.pre <- read.csv("./ma_study_data.csv", header=TRUE) %>%
  mutate(experiment=str_sub(MA_condition,1,1)) %>%
  mutate_if(is.character,as.factor) %>%
  mutate(subj = as.factor(subj),
         condition = MA_condition)%>%
  mutate_at(.vars="condition", .funs = tolower)

ma.data <- read.csv("./ma_study_designs.csv") %>%
  rename(paper = study_ID,
         condition = expt_condition,
         experiment = expt_num,
         task = looking_task) %>%
    mutate_if(is.character,as.factor) %>%
  mutate_at(.vars="condition", .funs = tolower)

study.design <- ma.data %>%
  select(paper, experiment, condition, task, training_yesno, action_consequence, actor_hand, agent_efficient_fam, object_diff_size_huge, action_causal, action_consequence, location_object_goal_ambiguous, bothobjects_present_visible_fam, agent, PI_group) %>%
    mutate_if(is.character,as.factor) %>%
  mutate(experiment = as.factor(experiment)) %>%
  filter(paper != "sommerville2005",
         task != "causes")

ma.data %>% 
  select(paper, long_cite) %>%
  unique() %>%
  knitr::kable()
paper long_cite
1 choi_unpub Choi, Y.J. & Luo, Y. (unpublished). Three-month-old infants’ understanding of a human agent’s preference
4 choi2018 Choi, Y. J., Mou, Y., & Luo, Y. (2018). How do 3-month-old infants attribute preferences to a human agent?. Journal of experimental child psychology, 172, 96-106.
7 gerson2014a Gerson, S. A., & Woodward, A. L. (2014a). The joint role of trained, untrained, and observed actions at the origins of goal recognition. Infant Behavior and Development, 37(1), 94-104.
10 gerson2014b Gerson, S. A., & Woodward, A. L. (2014b). Learning from their own actions: the unique effect of producing actions on infants’ action understanding. Child Development, 85(1), 264–277.
13 woo_unpub Woo, B. M., Liu, S., & Spelke, E. S. (2021). Open-minded, not naïve: Three-month-old infants encode objects as the goals of other people’s reaches. Proceedings of the 43rd Annual Meeting of the Cognitive Science Society.
14 woo_unpub Woo, Liu & Spelke (unpublished). Three-month-olds’ understanding of object-directed reaches that make objects change slate
16 liu2019 Liu, S., Brooks, N. B., & Spelke, E. S. (2019). Origins of the concepts cause, cost, and goal in prereaching infants. Proceedings of the National Academy of Sciences, 116(36), 17747-17752.
23 luo2011 Luo, Y. (2011). Three‐month‐old infants attribute goals to a non‐human agent. Developmental science, 14(2), 453-460.
26 skerry2013 Skerry, A. E., Carey, S. E., & Spelke, E. S. (2013). First-person action experience reveals sensitivity to action efficiency in prereaching infants. Proceedings of the National Academy of Sciences, 110(46), 18728-18733.
31 sommerville2005 Sommerville, J. A., Woodward, A. L., & Needham, A. (2005). Action experience alters 3-month-old infants’ perception of others’ actions. Cognition, 96(1), 1–11.
34 krogh2009 Krogh, L. (2009). The effect of action on causal perception in 3- and 4.5-month-old infants (Undergraduate thesis, Carnegie Mellon University). Retrieved from http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.467.8278&rep=rep1&type=pdf
37 desrochers1999 Desrochers, S. (1999). Infants’ processing of causal and noncausal events at 3.5 months of age. The Journal of Genetic Psychology, 160(3), 294–302.
# merge study design with individual looks from babies

ind.data <- left_join(ind.data.pre, study.design, 
                      by=c("paper", "experiment", "condition")) %>%
  mutate(sex = tolower(str_sub(sex,1,1)),
         look_pref = unexp_look - exp_look) %>%
  mutate(paper = str_replace_all(paper, "unpublished", "unpub")) %>%
  mutate(task = str_replace_all(task, "efficiency", "constraints")) %>%
  mutate_if(is.character,as.factor) 

str(ind.data)
# 'data.frame': 650 obs. of  21 variables:
#  $ subj                           : Factor w/ 650 levels "1","2","3","4",..: 1 2 3 4 5 6 7 8 9 10 ...
#  $ paper                          : Factor w/ 8 levels "choi_unpub","choi2018",..: 6 6 6 6 6 6 6 6 6 6 ...
#  $ MA_condition                   : Factor w/ 26 levels "1_Active","1_Control",..: 12 12 12 12 12 12 12 12 12 12 ...
#  $ ageday                         : num  103.3 86.9 105.3 129.8 107.3 ...
#  $ exp_look                       : num  13.1 18.4 15.1 7.1 5.8 9 18.9 6 31 42.9 ...
#  $ unexp_look                     : num  60 60 46.3 13.6 12 60 8 21 60 42.7 ...
#  $ sex                            : Factor w/ 2 levels "f","m": 2 2 2 1 1 1 2 2 2 1 ...
#  $ experiment                     : Factor w/ 6 levels "1","2","3","4",..: 1 1 1 1 1 1 1 1 1 1 ...
#  $ condition                      : Factor w/ 26 levels "1_active","1_control",..: 12 12 12 12 12 12 12 12 12 12 ...
#  $ task                           : Factor w/ 2 levels "constraints",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ training_yesno                 : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ action_consequence             : Factor w/ 3 levels "location_change",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ actor_hand                     : Factor w/ 3 levels "bare","gloved",..: NA NA NA NA NA NA NA NA NA NA ...
#  $ agent_efficient_fam            : Factor w/ 2 levels "no","yes": NA NA NA NA NA NA NA NA NA NA ...
#  $ object_diff_size_huge          : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ action_causal                  : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ location_object_goal_ambiguous : Factor w/ 2 levels "no","yes": 2 2 2 2 2 2 2 2 2 2 ...
#  $ bothobjects_present_visible_fam: Factor w/ 3 levels "","no","yes": 3 3 3 3 3 3 3 3 3 3 ...
#  $ agent                          : Factor w/ 3 levels "hand","object",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ PI_group                       : Factor w/ 5 levels "desrochers","luo",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ look_pref                      : num  46.9 41.6 31.2 6.5 6.2 ...
# View(ind.data)
# function that returns column of standardized betas from lmer model
gen.beta <- function(model) {
    df <- data.frame(fixef(model))
    names(df) <- c("beta")
    return(df)
}

# function that computes CIs and returns them in df
gen.ci <- function(model) {
  df <- data.frame(confint(model))
  names(df) <- c("lower", "upper")
  return(df)
}

# function that converts model summary (lmer) to df
gen.m <- function(model) {
  df <- data.frame(coef(summary(model)))
  names(df) <- c("b", "se", "df", "t", "p")
  return(df)
}

# function that converts model summary (lm) to df
gen.lm <- function(model) {
  df <- data.frame(coef(summary(model)))
  names(df) <- c("b", "se", "t", "p")
  return(df)
}

# function that returns age info and number of females in a dataset
ages <- function(longdata) {
  longdata %>% summarize(mean = mean(ageday), min=range(ageday)[1], max=range(ageday)[2], f=sum(sex=="f")/2)
}

# function that returns formatted result from lme4/lmerTest table
report <- function(table, index, places, tails, flip) {
  if (tails == "1") {
    p <- round(table$p[index], places)/2
    howmanytails <- "one-tailed"
  } else {
    p <- round(table$p[index], places)
    howmanytails <- "two-tailed"
  }
  if (p < .001) {
    p <- "<.001"
  } else {
    p <- paste("=", round(p, places), sep = "")
  }
  if (missing(flip)) {
    result <- paste("[", round(table$lower[index], places), ",", round(table$upper[index], places), "], ß=", round(table$beta[index], places), ", B=", round(table$b[index],places), ", SE=", round(table$se[index],places), ", p", p, ", ", howmanytails, sep = "")
  } else {
    result <- paste("[", -round(table$upper[index], places), ",", -round(table$lower[index], places), "], ß=", -round(table$beta[index], places), ", B=", -round(table$b[index],places), ", SE=", round(table$se[index],places), ", p", p, ", ", howmanytails, sep = "")
  }
  return(result)
}

se <- function(x, na.rm = FALSE) {
 if (na.rm) x <- na.omit(x)
 return(sd(x) / sqrt(length(x)))
}

describe <- function(dataset){
  summary <- dataset %>%
  summarise(lookdiff_avg = mean(look_pref, na.rm=TRUE),
            lookdiff_SE = se(look_pref,na.rm=TRUE),
            n = n())
  paste(
        #"M_unexp = ", summary$unexp_avg, "s ",
        #"M_exp = ", summary$exp_avg, "s ",
        "Mean looking preference = ", round(summary$lookdiff_avg,3), "seconds, ",
        "standard error (SE) = ", round(summary$lookdiff_SE,3)
  )
}
## Retrieved from : http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/#error-bars-for-within-subjects-variables
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
##   data: a data frame.
##   measurevar: the name of a column that contains the variable to be summariezed
##   groupvars: a vector containing names of columns that contain grouping variables
##   na.rm: a boolean that indicates whether to ignore NA's
##   conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
                      conf.interval=.95, .drop=TRUE) {
  library(plyr)
  
  # New version of length which can handle NA's: if na.rm==T, don't count them
  length2 <- function (x, na.rm=FALSE) {
    if (na.rm) sum(!is.na(x))
    else       length(x)
  }
  
  # This does the summary. For each group's data frame, return a vector with
  # N, mean, and sd
  datac <- ddply(data, groupvars, .drop=.drop,
                 .fun = function(xx, col) {
                   c(N    = length2(xx[[col]], na.rm=na.rm),
                     mean = mean   (xx[[col]], na.rm=na.rm),
                     sd   = sd     (xx[[col]], na.rm=na.rm)
                   )
                 },
                 measurevar
  )
  
  # Rename the "mean" column    
  datac <- plyr::rename(datac, c("mean" = measurevar))
  
  datac$se <- datac$sd / sqrt(datac$N)  # Calculate standard error of the mean
  
  # Confidence interval multiplier for standard error
  # Calculate t-statistic for confidence interval: 
  # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
  ciMult <- qt(conf.interval/2 + .5, datac$N-1)
  datac$ci <- datac$se * ciMult
  
  return(datac)
}
## Norms the data within specified groups in a data frame; it normalizes each
## subject (identified by idvar) so that they have the same mean, within each group
## specified by betweenvars.
##   data: a data frame.
##   idvar: the name of a column that identifies each subject (or matched subjects)
##   measurevar: the name of a column that contains the variable to be summariezed
##   betweenvars: a vector containing names of columns that are between-subjects variables
##   na.rm: a boolean that indicates whether to ignore NA's
normDataWithin <- function(data=NULL, idvar, measurevar, betweenvars=NULL,
                           na.rm=FALSE, .drop=TRUE) {
  library(plyr)
  
  # Measure var on left, idvar + between vars on right of formula.
  data.subjMean <- ddply(data, c(idvar, betweenvars), .drop=.drop,
                         .fun = function(xx, col, na.rm) {
                           c(subjMean = mean(xx[,col], na.rm=na.rm))
                         },
                         measurevar,
                         na.rm
  )
  
  # Put the subject means with original data
  data <- merge(data, data.subjMean)
  
  # Get the normalized data in a new column
  measureNormedVar <- paste(measurevar, "_norm", sep="")
  data[,measureNormedVar] <- data[,measurevar] - data[,"subjMean"] +
    mean(data[,measurevar], na.rm=na.rm)
  
  # Remove this subject mean column
  data$subjMean <- NULL
  
  return(data)
}

## Summarizes data, handling within-subjects variables by removing inter-subject variability.
## It will still work if there are no within-S variables.
## Gives count, un-normed mean, normed mean (with same between-group mean),
##   standard deviation, standard error of the mean, and confidence interval.
## If there are within-subject variables, calculate adjusted values using method from Morey (2008).
##   data: a data frame.
##   measurevar: the name of a column that contains the variable to be summariezed
##   betweenvars: a vector containing names of columns that are between-subjects variables
##   withinvars: a vector containing names of columns that are within-subjects variables
##   idvar: the name of a column that identifies each subject (or matched subjects)
##   na.rm: a boolean that indicates whether to ignore NA's
##   conf.interval: the percent range of the confidence interval (default is 95%)
summarySEwithin <- function(data=NULL, measurevar, betweenvars=NULL, withinvars=NULL,
                            idvar=NULL, na.rm=FALSE, conf.interval=.95, .drop=TRUE) {
  
  # Ensure that the betweenvars and withinvars are factors
  factorvars <- vapply(data[, c(betweenvars, withinvars), drop=FALSE],
                       FUN=is.factor, FUN.VALUE=logical(1))
  
  if (!all(factorvars)) {
    nonfactorvars <- names(factorvars)[!factorvars]
    message("Automatically converting the following non-factors to factors: ",
            paste(nonfactorvars, collapse = ", "))
    data[nonfactorvars] <- lapply(data[nonfactorvars], factor)
  }
  
  # Get the means from the un-normed data
  datac <- summarySE(data, measurevar, groupvars=c(betweenvars, withinvars),
                     na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
  
  # Drop all the unused columns (these will be calculated with normed data)
  datac$sd <- NULL
  datac$se <- NULL
  datac$ci <- NULL
  
  # Norm each subject's data
  ndata <- normDataWithin(data, idvar, measurevar, betweenvars, na.rm, .drop=.drop)
  
  # This is the name of the new column
  measurevar_n <- paste(measurevar, "_norm", sep="")
  
  # Collapse the normed data - now we can treat between and within vars the same
  ndatac <- summarySE(ndata, measurevar_n, groupvars=c(betweenvars, withinvars),
                      na.rm=na.rm, conf.interval=conf.interval, .drop=.drop)
  
  # Apply correction from Morey (2008) to the standard error and confidence interval
  #  Get the product of the number of conditions of within-S variables
  nWithinGroups    <- prod(vapply(ndatac[,withinvars, drop=FALSE], FUN=nlevels,
                                  FUN.VALUE=numeric(1)))
  correctionFactor <- sqrt( nWithinGroups / (nWithinGroups-1) )
  
  # Apply the correction factor
  ndatac$sd <- ndatac$sd * correctionFactor
  ndatac$se <- ndatac$se * correctionFactor
  ndatac$ci <- ndatac$ci * correctionFactor
  
  # Combine the un-normed means with the normed results
  merge(datac, ndatac)
}
ind.data.long <- ind.data %>%
         rename(expected = exp_look,
                unexpected = unexp_look) %>%
         pivot_longer(cols = c(expected, unexpected),
                      names_to = "trial_type",
                      values_to = "looking_time")

ind.data.summary <- summarySEwithin(data = ind.data.long, measurevar = "looking_time", betweenvars = c("task", "paper", "experiment", "condition"), withinvars = "trial_type")
# Automatically converting the following non-factors to factors: trial_type
ind.pref.summary <- summarySE(data = ind.data, measurevar = "look_pref", groupvars = c("task", "paper", "experiment", "condition"))

summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
                      conf.interval=.95, .drop=TRUE) 
  
n_infants <- ind.data.summary %>%
  filter(trial_type == "expected") %>% # just take one row per study
  summarise(sum(N))

n_conditions <- ind.data.summary %>%
  filter(trial_type == "expected") %>% # just take one row per study
  nrow()

n_papers <- ind.data.summary %>%
  filter(trial_type == "expected") %>% # just take one row per study
  group_by(paper) %>%
  count(paper) %>%
  nrow()

Summary of data sources

We searched for all journal papers, theses, and conference proceedings that reported looking time data from typically developing infants between 2 and 4 months of age in a task structured like the goals or constraints task. We also emailed two listservs (Cognitive Development Society, and Infant Studies) to request more datasets. This resulted in a final list of 11 papers and 35 conditions. We contacted the authors and asked them to send us the original datasets from this past published and unpublished work. We were able to gather original datasets from 8 papers, 30 conditions, and 650 infants (age range 72-137. A team of researchers were then randomly assigned to look through relevant papers including supplemental materials (with SL double-checking every entry and resolving disagreements). When exact values were not provided, or when we came across other ambiguities (e.g. numbers reported in the paper differ from the numbers calculated using the raw data), the team contacted authors to try and address, and also used the tool WebplotDigitizer (https://automeris.io/WebPlotDigitizer/) to extract estimated values from figures. If there were discrepancies between the paper, figures and/or raw data, we prioritized the values from the raw data if available, then author correspondence, then paper, then estimates from figures, in that order. For individual datasets, authors were asked to provide their data and a codebook, and were asked for permission to share their stimuli and data publicly on OSF. Of 8 papers (30, conditions), data from xx conditions and stimuli from xx conditions (either actual study videos, or example stimuli) are publicly available at https://osf.io/zwncg/.

table.goals <- ind.data %>%
  filter(task=="goals") %>%
  group_by(paper, condition, training_yesno, action_causal, action_consequence, location_object_goal_ambiguous, bothobjects_present_visible_fam, agent) %>%
  summarise(n = n(), agemin = floor(range(ageday)[1]), agemax = floor(range(ageday)[2])) %>%
  select(paper, condition, n, agemin, agemax, training_yesno, action_causal, action_consequence, location_object_goal_ambiguous, bothobjects_present_visible_fam, agent)
# `summarise()` regrouping output by 'paper', 'condition', 'training_yesno',
# 'action_causal', 'action_consequence', 'location_object_goal_ambiguous',
# 'bothobjects_present_visible_fam' (override with `.groups` argument)
table.constraints <- ind.data %>%
  filter(task=="constraints") %>%
  group_by(paper, condition, training_yesno, action_causal, action_consequence, agent_efficient_fam, actor_hand) %>%
  summarise(n = n(), agemin = floor(range(ageday)[1]), agemax = floor(range(ageday)[2])) %>%
  select(paper, condition, n, agemin, agemax, training_yesno, action_causal, action_consequence, agent_efficient_fam, actor_hand)
# `summarise()` regrouping output by 'paper', 'condition', 'training_yesno',
# 'action_causal', 'action_consequence', 'agent_efficient_fam' (override with
# `.groups` argument)
knitr::kable(table.goals)
paper condition n agemin agemax training_yesno action_causal action_consequence location_object_goal_ambiguous bothobjects_present_visible_fam agent
choi_unpub 1_equidistant 24 76 137 no no none yes yes person
choi_unpub 1_fartarget 24 72 130 no no none yes no person
choi_unpub 1_neartarget 24 74 127 no no none yes yes person
choi2018 1_hidden 16 75 121 no no none yes yes person
choi2018 1_oneobject 16 75 127 no no none yes yes person
choi2018 1_twoobject 16 77 130 no no none yes yes person
gerson2014a 1_active 24 91 121 yes no none yes yes hand
gerson2014a 1_control 24 91 120 no no none yes yes hand
gerson2014a 1_observation 24 91 121 no no none yes yes hand
gerson2014b 1_active 30 91 125 yes no none yes yes hand
gerson2014b 1_observation 30 92 125 no no none yes yes hand
gerson2014b 2_generalization 30 94 125 yes no none yes yes hand
luo2011 1_oneobject 12 76 124 no no none yes no object
luo2011 1_twoobject 12 79 129 no no none yes yes object
luo2011 2_differentpositions 12 81 118 no no none no no object
woo_unpub 1_statechange_objectswap 20 93 120 no yes state_change yes yes person
woo_unpub 2_disambiguatingobjectgoal 24 91 121 no yes state_change no yes person
woo_unpub 3_disambiguatinglocationgoal 24 90 121 no yes state_change no yes person
knitr::kable(table.constraints)
paper condition n agemin agemax training_yesno action_causal action_consequence agent_efficient_fam actor_hand
liu2019 1_pickupglove 20 92 122 no yes location_change yes gloved
liu2019 2_pickupbarehand 20 93 120 no yes location_change yes bare
liu2019 3_statechangebarrier 20 91 122 no yes state_change yes gloved
liu2019 3_statechangenobarrier 20 91 122 no yes state_change no gloved
liu2019 4_statechangenotcausal 20 93 121 no no state_change yes gloved
liu2019 5_statechangecausal 26 92 121 no yes state_change yes gloved
liu2019 5_statechangenotcausal 26 93 120 no no state_change yes gloved
skerry2013 1_effectiveactiontraining 20 93 121 yes yes location_change yes mittened
skerry2013 2_ineffectiveactiontraining 20 93 122 no yes location_change yes mittened
skerry2013 3_notraining 20 91 121 no yes location_change yes mittened
skerry2013 4_constrainedactionhabituation 26 93 120 yes yes location_change yes mittened
skerry2013 5_unconstrainedactionhabituation 26 93 122 yes yes location_change no mittened
str(ind.data)
# 'data.frame': 650 obs. of  21 variables:
#  $ subj                           : Factor w/ 650 levels "1","2","3","4",..: 1 2 3 4 5 6 7 8 9 10 ...
#  $ paper                          : Factor w/ 8 levels "choi_unpub","choi2018",..: 6 6 6 6 6 6 6 6 6 6 ...
#  $ MA_condition                   : Factor w/ 26 levels "1_Active","1_Control",..: 12 12 12 12 12 12 12 12 12 12 ...
#  $ ageday                         : num  103.3 86.9 105.3 129.8 107.3 ...
#  $ exp_look                       : num  13.1 18.4 15.1 7.1 5.8 9 18.9 6 31 42.9 ...
#  $ unexp_look                     : num  60 60 46.3 13.6 12 60 8 21 60 42.7 ...
#  $ sex                            : Factor w/ 2 levels "f","m": 2 2 2 1 1 1 2 2 2 1 ...
#  $ experiment                     : Factor w/ 6 levels "1","2","3","4",..: 1 1 1 1 1 1 1 1 1 1 ...
#  $ condition                      : Factor w/ 26 levels "1_active","1_control",..: 12 12 12 12 12 12 12 12 12 12 ...
#  $ task                           : Factor w/ 2 levels "constraints",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ training_yesno                 : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ action_consequence             : Factor w/ 3 levels "location_change",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ actor_hand                     : Factor w/ 3 levels "bare","gloved",..: NA NA NA NA NA NA NA NA NA NA ...
#  $ agent_efficient_fam            : Factor w/ 2 levels "no","yes": NA NA NA NA NA NA NA NA NA NA ...
#  $ object_diff_size_huge          : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ action_causal                  : Factor w/ 2 levels "no","yes": 1 1 1 1 1 1 1 1 1 1 ...
#  $ location_object_goal_ambiguous : Factor w/ 2 levels "no","yes": 2 2 2 2 2 2 2 2 2 2 ...
#  $ bothobjects_present_visible_fam: Factor w/ 3 levels "","no","yes": 3 3 3 3 3 3 3 3 3 3 ...
#  $ agent                          : Factor w/ 3 levels "hand","object",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ PI_group                       : Factor w/ 5 levels "desrochers","luo",..: 2 2 2 2 2 2 2 2 2 2 ...
#  $ look_pref                      : num  46.9 41.6 31.2 6.5 6.2 ...
# data appear to be normally distributed
dist1 <- ggplot(ind.data, aes(x=look_pref)) +
  geom_histogram()+
  theme_cowplot(12)

dist2 <- ggplot(ind.data, aes(x=look_pref)) +
  geom_histogram()+
  facet_wrap(~paper,nrow=3)+
  theme_cowplot(12)

(dist1 | dist2)  + plot_layout(widths = c(1,4)) + plot_annotation(tag_levels = 'A')
# `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
# `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

Data appear to be normally distributed, no need to log transform.

Plots

Looking to the expected vs unexpected event for all individual conditions.

plot1 <- ggplot(ind.data.long,
       aes(trial_type, looking_time, colour=paper))+
  theme_cowplot(12)+
  theme(legend.title = element_blank())+
  geom_boxplot(outlier.alpha = 0.2)+
  geom_point(alpha=0.2)+
  geom_line(aes(group=subj), alpha=0.2)+
  stat_summary(geom="point", fun="mean", colour="black")+
  # geom_errorbar(data = ind.data.summary, colour="red", position = position_dodge(width = 5), width = 0, aes(ymin=looking_time-se, ymax=looking_time+se)) +
  stat_summary(geom="errorbar", fun.data="mean_se", width=0.2, colour="black")+
  ylab("Looking Time (s)")+
  xlab("Condition")+
  facet_wrap(~task+paper+condition, nrow=6, labeller=label_wrap_gen())+
  theme(axis.text.x = element_text(angle = 90, hjust=0.95, vjust =0.2),
        legend.position="bottom")

plot1

Looking preference (unexpected - expected) for all individual conditions.

plot2.goals <- ggplot(ind.data %>% filter(task == "goals"),
                 aes(condition, look_pref, fill=paper)) +
  geom_boxplot(outlier.alpha = 0.2, position = position_dodge2(preserve = "single"))+
  geom_hline(yintercept = 0)+
  geom_point(alpha=0.2)+
  stat_summary(geom="point", fun="mean", colour="black")+
  stat_summary(geom="errorbar", fun.data="mean_se", width=0.2, colour="black")+
  theme_cowplot(12)+
  facet_grid(~paper, scales = "free_x",space = "free", labeller = label_wrap_gen())+
  ylab("Looking preference (s) \n <- Expected ------- Unexpected ->")+
  coord_cartesian(ylim=c(-40,40))+
  theme(axis.text.x = element_text(angle = 90, hjust=0.95, vjust =0.2), legend.position = "none")+
  scale_fill_brewer(palette = "Dark2")

plot2.constraints <- ggplot(ind.data %>% filter(task == "constraints"),
                 aes(condition, look_pref, fill=paper)) +
  geom_boxplot(outlier.alpha = 0.2, position = position_dodge2(preserve = "single"))+
  geom_hline(yintercept = 0)+
  geom_point(alpha=0.2)+
  stat_summary(geom="point", fun="mean", colour="black")+
  stat_summary(geom="errorbar", fun.data="mean_se", width=0.2, colour="black")+
  theme_cowplot(12)+
  facet_grid(~paper,scales = "free_x",space = "free", labeller = label_wrap_gen())+
  ylab("")+
  xlab("")+
  coord_cartesian(ylim=c(-40,40))+
  theme(axis.text.x = element_text(angle = 90, hjust=0.95, vjust =0.2), legend.position = "none")+
  scale_fill_manual(values = wes_palette("Royal2"))


(plot2.goals | plot2.constraints) + plot_layout(widths = c(2, 1)) + plot_annotation(tag_levels = 'A')

Looking preference (unexpected - expected) vs age for all individual conditions.

(plot3 <- ggplot(ind.data,
                 aes(ageday, look_pref, colour=paper)) +
  # geom_boxplot(outlier.alpha = 0.2, position = position_dodge2(preserve = "single"))+
  geom_hline(yintercept = 0)+
  geom_point()+
  geom_smooth(method="lm")+
  # stat_summary(geom="point", fun="mean", colour="black")+
  # stat_summary(geom="errorbar", fun.data="mean_se", width=0.2, colour="black")+
  theme_cowplot(12)+
  facet_grid(~task+paper)+
  ylab("Looking preference (s) \n <- Expected ------- Unexpected ->")+
  theme(axis.text.x = element_text(angle = 90, hjust=0.95, vjust =0.2)))
# `geom_smooth()` using formula 'y ~ x'

Confirmatory Analysis

Constraints Task

constraints <- ind.data %>% filter(task =="constraints")

# checking ref levels for all factors
constraints$actor_hand <- factor(constraints$actor_hand, levels=c("bare", "gloved", "mittened")) # for summed contr last level is dropped
constraints$action_consequence <- relevel(constraints$action_consequence, ref = "state_change")
constraints$agent_efficient_fam <- relevel(constraints$agent_efficient_fam, ref = "yes")
constraints$training_yesno <- relevel(constraints$training_yesno, ref = "yes")
constraints$action_causal <- relevel(constraints$action_causal, ref = "yes")

Baseline Effect

constraints.baseline.data <- constraints %>%
  filter(condition %in% c("3_notraining",
                          "1_pickupglove",
                          "2_pickupbarehand"))

constraints.b1 <- lmer(data = constraints.baseline.data,
                       formula = look_pref ~ 1 + ageday + (1|condition))
summary(constraints.b1)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: look_pref ~ 1 + ageday + (1 | condition)
#    Data: constraints.baseline.data
# 
# REML criterion at convergence: 366.4
# 
# Scaled residuals: 
#     Min      1Q  Median      3Q     Max 
# -2.7189 -0.5297 -0.0435  0.7252  2.2686 
# 
# Random effects:
#  Groups    Name        Variance Std.Dev.
#  condition (Intercept)  2.03    1.42    
#  Residual              25.37    5.04    
# Number of obs: 60, groups:  condition, 3
# 
# Fixed effects:
#             Estimate Std. Error      df t value Pr(>|t|)
# (Intercept)  -2.3930     9.0407 57.2416   -0.26     0.79
# ageday        0.0258     0.0832 56.0050    0.31     0.76
# 
# Correlation of Fixed Effects:
#        (Intr)
# ageday -0.993
resid_panel(constraints.b1, plots = "all",
                          smoother = TRUE,
                          qqbands = TRUE)
# `geom_smooth()` using formula 'y ~ x'
# `geom_smooth()` using formula 'y ~ x'

constraints.b1.table <- sjPlot::tab_model(constraints.b1,
                                           show.std =TRUE,
                                           show.stat=TRUE,
                                           show.df=TRUE)

constraints.b1.beta <- summary(effectsize::standardize(constraints.b1))

constraints.baseline <- cbind(
  gen.beta(effectsize::standardize(constraints.b1)),
  gen.m(constraints.b1),
  gen.ci(constraints.b1)[3:4,]
) 
# Computing profile confidence intervals ...
constraints.baseline
#                                beta        b      se    df       t      p
# (Intercept) -0.00000000000000002645 -2.39296 9.04071 57.24 -0.2647 0.7922
# ageday       0.03966247939082377660  0.02582 0.08316 56.01  0.3105 0.7574
#                lower   upper
# (Intercept) -20.1694 15.4724
# ageday       -0.1389  0.1897
cooks1 <- cooks.distance(constraints.b1, group = "subj")
# Warning in cooks.distance.lmerMod(constraints.b1, group = "subj"): group is not
# a valid argument for this function. As of version 0.4.0, group has been replaced
# by level.
dotplot_diag(x = cooks1, cutoff = "internal", name = "cooks.distance",  index=constraints.baseline.data$subj) + ylab("Cook's distance") + xlab("subjID")

excluded.subs <- c("90", "553", "86") 

constraints.b1.cooks <- lmer(data = constraints.baseline.data %>%
                         filter(!subj %in% excluded.subs),
                       formula = look_pref ~ 1 + ageday + (1|condition))


summary(constraints.b1.cooks)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: look_pref ~ 1 + ageday + (1 | condition)
#    Data: constraints.baseline.data %>% filter(!subj %in% excluded.subs)
# 
# REML criterion at convergence: 338.6
# 
# Scaled residuals: 
#     Min      1Q  Median      3Q     Max 
# -2.5544 -0.5380 -0.0717  0.7259  2.4971 
# 
# Random effects:
#  Groups    Name        Variance Std.Dev.
#  condition (Intercept)  1.14    1.07    
#  Residual              21.61    4.65    
# Number of obs: 57, groups:  condition, 3
# 
# Fixed effects:
#             Estimate Std. Error      df t value Pr(>|t|)
# (Intercept)  -9.5734     9.0731 54.2948   -1.06     0.30
# ageday        0.0926     0.0838 53.5855    1.11     0.27
# 
# Correlation of Fixed Effects:
#        (Intr)
# ageday -0.995
plot(allEffects(constraints.b1.cooks))

constraints.baseline.cooks <- cbind(
  gen.beta(effectsize::standardize(constraints.b1.cooks)),
  gen.m(constraints.b1.cooks),
  gen.ci(constraints.b1.cooks)[3:4,]
) 
# Computing profile confidence intervals ...

For standard versions of the constraints task, we found no differential looking between the expected and unexpected events, Mean looking preference = 0.395 seconds, standard error (SE) = 0.663, [-0.139,0.19], ß=0.04, B=0.026, SE=0.083, p=0.757, two-tailed. This result held when we removed 3 influential observations, identified using Cook’s distance, [-0.07,0.263], ß=0.145, B=0.093, SE=0.084, p=0.274, two-tailed.

Intervention Analysis

constraints.m1 <- lmer(data = constraints,
     formula = look_pref ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + ageday + (1|condition) + (1|experiment) + (1|paper))
# boundary (singular) fit: see ?isSingular
# Warning in as_lmerModLT(model, devfun): Model may not have converged with 1
# eigenvalue close to zero: -3.1e-11
summary(constraints.m1)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: look_pref ~ training_yesno + action_causal + action_consequence +  
#     actor_hand + agent_efficient_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: constraints
# 
# REML criterion at convergence: 1682
# 
# Scaled residuals: 
#    Min     1Q Median     3Q    Max 
# -3.729 -0.510 -0.059  0.561  3.866 
# 
# Random effects:
#  Groups     Name        Variance Std.Dev.
#  condition  (Intercept)  0.000   0.000   
#  experiment (Intercept)  0.000   0.000   
#  paper      (Intercept)  0.121   0.347   
#  Residual               35.324   5.943   
# Number of obs: 264, groups:  condition, 12; experiment, 5; paper, 2
# 
# Fixed effects:
#                      Estimate Std. Error       df t value Pr(>|t|)    
# (Intercept)           -2.8929     4.6171 256.0000   -0.63  0.53151    
# training_yesno1        2.1859     0.6174 256.0000    3.54  0.00047 ***
# action_causal1         2.3185     0.5944 256.0000    3.90  0.00012 ***
# action_consequence1    1.0693     0.7763 256.0000    1.38  0.16959    
# actor_hand1            1.3018     1.0518 256.0000    1.24  0.21698    
# actor_hand2            0.8084     1.0518 256.0000    0.77  0.44282    
# agent_efficient_fam1   1.7116     0.5377 256.0000    3.18  0.00164 ** 
# ageday                 0.0231     0.0419 256.0000    0.55  0.58139    
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_cs1 actn_cn1 actr_1 actr_2 agn__1
# tranng_ysn1  0.062                                              
# action_csl1 -0.143  0.085                                       
# actn_cnsqn1 -0.010  0.065  0.349                                
# actor_hand1  0.124  0.227 -0.001    0.360                       
# actor_hand2 -0.060  0.227  0.000   -0.721   -0.597              
# agnt_ffcn_1 -0.092  0.314  0.275    0.210    0.000  0.000       
# ageday      -0.982 -0.017  0.053    0.036   -0.010 -0.008  0.018
# convergence code: 0
# boundary (singular) fit: see ?isSingular
resid_panel(constraints.m1, plots = "all",
                          smoother = TRUE,
                          qqbands = TRUE)
# `geom_smooth()` using formula 'y ~ x'
# `geom_smooth()` using formula 'y ~ x'

cooks1 <- cooks.distance(constraints.m1, group = "subj")
# Warning in cooks.distance.lmerMod(constraints.m1, group = "subj"): group is not
# a valid argument for this function. As of version 0.4.0, group has been replaced
# by level.
dotplot_diag(x = cooks1, cutoff = "internal", name = "cooks.distance", index=constraints$subj) + ylab("Cook's distance") + xlab("ID")

sjPlot::plot_model(constraints.m1,
                   type = "est",
                   colors = "bw",
                   sort.est=TRUE,
                   axis.title=c("Effect (Unexpected - Expected in seconds)"),
                   show.values=TRUE,
                   show.p=TRUE)

constraints.m1.table <- sjPlot:: tab_model(constraints.m1,
                                           show.std =TRUE,
                                           show.stat=TRUE,
                                           show.df=TRUE)
# boundary (singular) fit: see ?isSingular
constraints.m1.beta <- summary(effectsize::standardize(constraints.m1))
# boundary (singular) fit: see ?isSingular
constraints.interventions <- cbind(
  gen.beta(effectsize::standardize(constraints.m1)),
  gen.m(constraints.m1),
  gen.ci(constraints.m1)[3:10,]
) 
# boundary (singular) fit: see ?isSingular
# Computing profile confidence intervals ...
constraints.cooks<- constraints[which(cooks1 <= 4/264),]

# where are the influential observations?
nrow(constraints)
# [1] 264
nrow(constraints.cooks)
# [1] 249
constraints.summary <- constraints %>%
  group_by(paper,condition) %>%
  summarise(n = n())
# `summarise()` regrouping output by 'paper' (override with `.groups` argument)
constraints.cooksn <- constraints.cooks %>%
  group_by(paper,condition) %>%
  summarise(n_cooks = n()) 
# `summarise()` regrouping output by 'paper' (override with `.groups` argument)
constraints.where.influential <- full_join(constraints.summary,constraints.cooksn) %>%
  mutate(n_excluded = n - n_cooks)
# Joining, by = c("paper", "condition")
knitr::kable(constraints.where.influential)
paper condition n n_cooks n_excluded
liu2019 1_pickupglove 20 18 2
liu2019 2_pickupbarehand 20 19 1
liu2019 3_statechangebarrier 20 19 1
liu2019 3_statechangenobarrier 20 17 3
liu2019 4_statechangenotcausal 20 17 3
liu2019 5_statechangecausal 26 26 0
liu2019 5_statechangenotcausal 26 23 3
skerry2013 1_effectiveactiontraining 20 20 0
skerry2013 2_ineffectiveactiontraining 20 20 0
skerry2013 3_notraining 20 19 1
skerry2013 4_constrainedactionhabituation 26 26 0
skerry2013 5_unconstrainedactionhabituation 26 25 1
constraints.m1.cooks <- lmer(data = constraints.cooks,
     formula = look_pref ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + ageday + (1|condition) + (1|experiment) + (1|paper),
     REML=FALSE)
# boundary (singular) fit: see ?isSingular
summary(constraints.m1.cooks)
# Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
#   method [lmerModLmerTest]
# Formula: look_pref ~ training_yesno + action_causal + action_consequence +  
#     actor_hand + agent_efficient_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: constraints.cooks
# 
#      AIC      BIC   logLik deviance df.resid 
#   1484.8   1527.0   -730.4   1460.8      237 
# 
# Scaled residuals: 
#     Min      1Q  Median      3Q     Max 
# -2.9748 -0.6479 -0.0429  0.6569  3.1180 
# 
# Random effects:
#  Groups     Name        Variance Std.Dev.
#  condition  (Intercept)  0.0     0.00    
#  experiment (Intercept)  0.0     0.00    
#  paper      (Intercept)  0.0     0.00    
#  Residual               20.7     4.55    
# Number of obs: 249, groups:  condition, 12; experiment, 5; paper, 2
# 
# Fixed effects:
#                      Estimate Std. Error       df t value   Pr(>|t|)    
# (Intercept)           -4.0078     3.6877 249.0000   -1.09      0.278    
# training_yesno1        1.9535     0.4773 249.0000    4.09 0.00005773 ***
# action_causal1         2.5634     0.4768 249.0000    5.38 0.00000017 ***
# action_consequence1    1.2255     0.6202 249.0000    1.98      0.049 *  
# actor_hand1            0.7581     0.8187 249.0000    0.93      0.355    
# actor_hand2            0.9867     0.8311 249.0000    1.19      0.236    
# agent_efficient_fam1   1.9383     0.4261 249.0000    4.55 0.00000844 ***
# ageday                 0.0281     0.0333 249.0000    0.84      0.400    
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_cs1 actn_cn1 actr_1 actr_2 agn__1
# tranng_ysn1  0.061                                              
# action_csl1 -0.161  0.076                                       
# actn_cnsqn1 -0.007  0.058  0.330                                
# actor_hand1  0.120  0.226 -0.001    0.377                       
# actor_hand2 -0.047  0.223 -0.002   -0.744   -0.644              
# agnt_ffcn_1 -0.124  0.313  0.248    0.190    0.000 -0.001       
# ageday      -0.983 -0.018  0.070    0.035   -0.009 -0.025  0.050
# convergence code: 0
# boundary (singular) fit: see ?isSingular
constraints.m1.cooks.beta <- lmer(data = constraints.cooks,
     formula = scale(look_pref) ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + scale(ageday) + (1|condition) + (1|experiment) + (1|paper),
     REML=FALSE)
# boundary (singular) fit: see ?isSingular
summary(constraints.m1.cooks.beta)
# Linear mixed model fit by maximum likelihood . t-tests use Satterthwaite's
#   method [lmerModLmerTest]
# Formula: 
# scale(look_pref) ~ training_yesno + action_causal + action_consequence +  
#     actor_hand + agent_efficient_fam + scale(ageday) + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: constraints.cooks
# 
#      AIC      BIC   logLik deviance df.resid 
#    683.5    725.7   -329.8    659.5      237 
# 
# Scaled residuals: 
#     Min      1Q  Median      3Q     Max 
# -2.9748 -0.6479 -0.0429  0.6569  3.1180 
# 
# Random effects:
#  Groups     Name        Variance Std.Dev.
#  condition  (Intercept) 0.000    0.00    
#  experiment (Intercept) 0.000    0.00    
#  paper      (Intercept) 0.000    0.00    
#  Residual               0.828    0.91    
# Number of obs: 249, groups:  condition, 12; experiment, 5; paper, 2
# 
# Fixed effects:
#                      Estimate Std. Error       df t value   Pr(>|t|)    
# (Intercept)           -0.3453     0.1338 249.0000   -2.58      0.010 *  
# training_yesno1        0.3908     0.0955 249.0000    4.09 0.00005773 ***
# action_causal1         0.5129     0.0954 249.0000    5.38 0.00000017 ***
# action_consequence1    0.2452     0.1241 249.0000    1.98      0.049 *  
# actor_hand1            0.1517     0.1638 249.0000    0.93      0.355    
# actor_hand2            0.1974     0.1663 249.0000    1.19      0.236    
# agent_efficient_fam1   0.3878     0.0853 249.0000    4.55 0.00000844 ***
# scale(ageday)          0.0490     0.0580 249.0000    0.84      0.400    
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_cs1 actn_cn1 actr_1 actr_2 agn__1
# tranng_ysn1  0.238                                              
# action_csl1 -0.510  0.076                                       
# actn_cnsqn1  0.151  0.058  0.330                                
# actor_hand1  0.613  0.226 -0.001    0.377                       
# actor_hand2 -0.393  0.223 -0.002   -0.744   -0.644              
# agnt_ffcn_1 -0.415  0.313  0.248    0.190    0.000 -0.001       
# scale(agdy) -0.058 -0.018  0.070    0.035   -0.009 -0.025  0.050
# convergence code: 0
# boundary (singular) fit: see ?isSingular
sjPlot::plot_model(constraints.m1.cooks.beta,
                   type = "pred",
                   # colors = "bw",
                   sort.est=TRUE,
                   axis.title=c("Effect (Unexpected - Expected in seconds)"),
                   show.values=TRUE,
                   show.p=TRUE)
# $training_yesno

# 
# $action_causal

# 
# $action_consequence

# 
# $actor_hand

# 
# $agent_efficient_fam

# 
# $ageday

plot(allEffects(constraints.m1.cooks.beta))

regressionplot1<- sjPlot::plot_model(constraints.m1.cooks.beta,
                   type = "est",
                   colors = "bw",
                   sort.est=TRUE,
                   title = "",
                   axis.labels= c("Age in days",
                                  "Hand (bare)",
                                  "Hand (gloved)",
                                  "State change",
                                  "Actor efficient during habituation",
                                  "Sticky mittens training",
                                  "Action on contact"),
                   axis.title=c("Effect on looking (unexpected - expected) in standard deviations"),
                   show.values=TRUE,
                   show.p=TRUE)

constraints.interventions.cooks <- cbind(
  gen.beta(effectsize::standardize(constraints.m1.cooks)),
  gen.m(constraints.m1.cooks),
  gen.ci(constraints.m1.cooks)[3:10,]
) 
# boundary (singular) fit: see ?isSingular
# Computing profile confidence intervals ...
sjPlot:: tab_model(constraints.m1.cooks.beta, show.stat=TRUE)
  scale(look pref)
Predictors Estimates CI Statistic p
(Intercept) -0.35 -0.61 – -0.08 -2.58 0.010
training_yesno1 0.39 0.20 – 0.58 4.09 <0.001
action_causal1 0.51 0.33 – 0.70 5.38 <0.001
action_consequence1 0.25 0.00 – 0.49 1.98 0.048
actor_hand1 0.15 -0.17 – 0.47 0.93 0.354
actor_hand2 0.20 -0.13 – 0.52 1.19 0.235
agent_efficient_fam1 0.39 0.22 – 0.55 4.55 <0.001
ageday 0.05 -0.06 – 0.16 0.84 0.399
Random Effects
σ2 0.83
τ00 condition 0.00
τ00 experiment 0.00
τ00 paper 0.00
N condition 12
N experiment 5
N paper 2
Observations 249
Marginal R2 / Conditional R2 0.170 / NA

Generating BFs

constraints.full.cooks <- lmer(data = constraints.cooks,
     formula = look_pref ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + ageday + (1|condition) + (1|experiment) + (1|paper), REML=FALSE,control = lmerControl(optimizer="Nelder_Mead",optCtrl=list(maxfun=200000))) # REML set to false to enable model comparison via likelihood methods, change optimizer to Nelder Mead to deal with failures to converge
# boundary (singular) fit: see ?isSingular
constraints.predictors=c("training_yesno","action_causal","action_consequence","actor_hand","agent_efficient_fam","ageday")

constraints.bf.cooks <- data.frame(constraints.predictors) %>%
  mutate(BF.cooks = NA,
         Interpretation.Cooks = NA) %>%
  rename(Fixed.Effect = constraints.predictors)

for (predictor in constraints.predictors) {
  modelform <- update(constraints.full.cooks, as.formula(paste0(". ~ . -",predictor)))
  BF <- exp((BIC(modelform) - BIC(constraints.full.cooks))/2)
  whichrow <- which(constraints.bf.cooks$Fixed.Effect == as.character(predictor))
  constraints.bf.cooks[whichrow,2] <- BF
  constraints.bf.cooks[whichrow,3] <- interpret_bf(BF)
}
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
constraints.bf.cooks
#          Fixed.Effect   BF.cooks              Interpretation.Cooks
# 1      training_yesno   30.28080 very strong evidence in favour of
# 2       action_causal 1312.97590     extreme evidence in favour of
# 3  action_consequence    0.42824        anecdotal evidence against
# 4          actor_hand    0.08219           strong evidence against
# 5 agent_efficient_fam  261.64453     extreme evidence in favour of
# 6              ageday    0.09043           strong evidence against
constraints.full <- lmer(data = constraints,
     formula = look_pref ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + ageday + (1|condition) + (1|experiment) + (1|paper), REML=FALSE,control = lmerControl(optimizer="Nelder_Mead",optCtrl=list(maxfun=200000))) # REML set to false to enable model comparison via likelihood methods, change optimizer to Nelder Mead to deal with failures to converge
# boundary (singular) fit: see ?isSingular
constraints.predictors=c("training_yesno","action_causal","action_consequence","actor_hand","agent_efficient_fam","ageday")

constraints.bf.full <- data.frame(constraints.predictors) %>%
  mutate(BF = NA,
         Interpretation = NA) %>%
  rename(Fixed.Effect = constraints.predictors)

for (predictor in constraints.predictors) {
  modelform <- update(constraints.full, as.formula(paste0(". ~ . -",predictor)))
  BF <- exp((BIC(modelform) - BIC(constraints.full))/2)
  whichrow <- which(constraints.bf.full$Fixed.Effect == as.character(predictor))
  constraints.bf.full[whichrow,2] <- BF
  constraints.bf.full[whichrow,3] <- interpret_bf(BF)

}
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
constraints.bf.full
#          Fixed.Effect       BF                    Interpretation
# 1      training_yesno  9.62491    moderate evidence in favour of
# 2       action_causal 56.48817 very strong evidence in favour of
# 3  action_consequence  0.16312         moderate evidence against
# 4          actor_hand  0.07069           strong evidence against
# 5 agent_efficient_fam  9.85298    moderate evidence in favour of
# 6              ageday  0.07605           strong evidence against
# combine BF tables
constraints.bf <- full_join(constraints.bf.full, constraints.bf.cooks) %>%
  arrange(desc(BF))
# Joining, by = "Fixed.Effect"

Exploratory Analysis

constraints.e1 <- lmer(data = constraints,
     formula = look_pref ~ training_yesno + action_causal + action_consequence + agent_efficient_fam + (1+condition|paper))

summary(constraints.e1)

When we compared the effects of different variants we found that motor training increased infants’ looking preference for the unexpected event ([5.387,6.39], ß=0.354, B=2.186, SE=0.617, p<.001, two-tailed). We also found that other interventions on the actions infants saw, including seeing an action that causes an effect on contact ([-11.821,6.036], ß=0.375, B=2.318, SE=0.594, p<.001, two-tailed). We did not find an effect of age, [-1.205,2.821], ß=0.033, B=0.023, SE=0.042, p=0.581, two-tailed. This analysis included 264 total infants, 15 of were classified as influential by Cook’s Distance. When these influential participants were excluded from the analysis, we found qualitatively equivalent results, except that the effect of seeing an action that resulted in a state change (versus location change) crossed the threshold into significance ([1.014,2.892], ß=0.245, B=1.226, SE=0.62, p=0.049, two-tailed).

The presence or absence of a significant fixed effect in a frequentist setting does not map directly to strength of evidence for that fixed effect. Thus, we computed Bayes Factors for each of these fixed effects, which expresses the strength of evidence for the H0 (i.e. the hypothesis that the fixed effect has no predictive power in the model) versus H1 (i.e. the hypothesis that the fixed effect has predictive power in the model). For the full model look_pref ~ training_yesno + action_causal + action_consequence + actor_hand + agent_efficient_fam + ageday + (1|condition) + (1|experiment) + (1|paper), excluding influential observations, we iteratively refit the model excluding each fixed effect, and computed the Bayes Factor that expresses the strength of evidence for the full model vs the model excluding this predictor. We used model BICs to compute Bayes Factors using the following formula exp((BIC(modelminusfixedeffect) - BIC(fullmodel))/2) (Wagenmakers, 2007). The Bayes Factor associated with each fixed effect is shown in Table X. There was very strong evidence that seeing a causal action predicted infants’ responses at test (BF = 56.4882). There was moderate evidence for the hypothesis that sticky mittens training (BF = 9.6249), and seeing an efficient agent initially (BF = 9.853), predicted infants’ violation-of-expectation responses at test. There was moderate for the null hypothesis (i.e. for the absence of this effect) for the predictors of seeing a state change vs pickup action (BF = 0.1631), and strong evidence for the null hypothesis that seeing bare vs gloved vs mittened hands (BF = 0.0707), and infant age (BF = 0.076) affected infants’ looking preference at test.

knitr::kable(constraints.bf)
Fixed.Effect BF Interpretation BF.cooks Interpretation.Cooks
action_causal 56.4882 very strong evidence in favour of 1312.9759 extreme evidence in favour of
agent_efficient_fam 9.8530 moderate evidence in favour of 261.6445 extreme evidence in favour of
training_yesno 9.6249 moderate evidence in favour of 30.2808 very strong evidence in favour of
action_consequence 0.1631 moderate evidence against 0.4282 anecdotal evidence against
ageday 0.0760 strong evidence against 0.0904 strong evidence against
actor_hand 0.0707 strong evidence against 0.0822 strong evidence against

Goals Task

Baseline Effect

goals <- ind.data %>% filter(task =="goals")

goals$action_consequence <- relevel(goals$action_consequence, ref = "none")
goals$agent <- factor(goals$agent, levels=c("object", "person", "hand")) # last level gets dropped in model estimates
goals$training_yesno <- relevel(goals$training_yesno, ref = "yes")
goals$bothobjects_present_visible_fam <- relevel(goals$bothobjects_present_visible_fam, ref = "yes")
goals.baseline.data <- goals %>%
  filter(condition %in% c("1_control",
                          "1_twoobject") &
           paper %in% c("gerson2014a",
                        "luo2011"))
  
goals.b1 <- lmer(data = goals.baseline.data,
                       formula = look_pref ~ 1 + ageday + (1|condition))

goals.b1.table <- sjPlot:: tab_model(goals.b1,
                                           show.std =TRUE,
                                           show.stat=TRUE,
                                           show.df=TRUE)

goals.b1.beta <- summary(effectsize::standardize(goals.b1))

goals.baseline <- cbind(
  gen.beta(effectsize::standardize(goals.b1)),
  gen.m(goals.b1),
  gen.ci(goals.b1)[3:4,]
) 
# Computing profile confidence intervals ...
cooks1 <- cooks.distance(goals.b1, group = "subj")
# Warning in cooks.distance.lmerMod(goals.b1, group = "subj"): group is not a
# valid argument for this function. As of version 0.4.0, group has been replaced
# by level.
dotplot_diag(x = cooks1, cutoff = "internal", name = "cooks.distance",  index=goals.baseline.data$subj) + ylab("Cook's distance") + xlab("subjID")

excluded.subs <- c("2", "7", "6", "11") 

goals.b1.cooks <- lmer(data = goals.baseline.data %>%
                         filter(!subj %in% excluded.subs),
                       formula = look_pref ~ 1 + ageday + (1|condition))

summary(goals.b1.cooks)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: look_pref ~ 1 + ageday + (1 | condition)
#    Data: goals.baseline.data %>% filter(!subj %in% excluded.subs)
# 
# REML criterion at convergence: 239.2
# 
# Scaled residuals: 
#     Min      1Q  Median      3Q     Max 
# -1.6626 -0.5931  0.0995  0.4841  2.8202 
# 
# Random effects:
#  Groups    Name        Variance Std.Dev.
#  condition (Intercept) 202      14.2    
#  Residual              106      10.3    
# Number of obs: 32, groups:  condition, 2
# 
# Fixed effects:
#             Estimate Std. Error     df t value Pr(>|t|)
# (Intercept)   44.720     27.063 21.547    1.65     0.11
# ageday        -0.355      0.236 29.073   -1.51     0.14
# 
# Correlation of Fixed Effects:
#        (Intr)
# ageday -0.925
goals.baseline.cooks <- cbind(
  gen.beta(effectsize::standardize(goals.b1.cooks)),
  gen.m(goals.b1.cooks),
  gen.ci(goals.b1.cooks)[3:4,]
) 
# Computing profile confidence intervals ...
# Warning in nextpar(mat, cc, i, delta, lowcut, upcut): unexpected decrease in
# profile: using minstep
# Warning in profile.merMod(object, which = parm, signames = oldNames, ...): non-
# monotonic profile for (Intercept)
# Warning in confint.thpr(pp, level = level, zeta = zeta): bad spline fit for
# (Intercept): falling back to linear interpolation

For standard versions of the goals task, we found differential looking between the expected and unexpected events, Mean looking preference = 3.918 seconds, standard error (SE) = 2.807, [-0.931,-0.081], ß=-0.318, B=-0.503, SE=0.214, p=0.025, two-tailed. However, this was driven by 4 influential observations - without these 4 observations, there was no longer a reliable looking preference, [-0.815,0.126], ß=-0.21, B=-0.355, SE=0.236, p=0.143, two-tailed.

Intervention Analysis

goals.m1 <- lmer(data = goals,
     formula = look_pref ~ training_yesno  + action_consequence + location_object_goal_ambiguous + agent + bothobjects_present_visible_fam + ageday + (1|condition) + (1|experiment) + (1|paper),
     control = lmerControl(optimizer="Nelder_Mead"))
# boundary (singular) fit: see ?isSingular
summary(goals.m1)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: 
# look_pref ~ training_yesno + action_consequence + location_object_goal_ambiguous +  
#     agent + bothobjects_present_visible_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: goals
# Control: lmerControl(optimizer = "Nelder_Mead")
# 
# REML criterion at convergence: 3022
# 
# Scaled residuals: 
#    Min     1Q Median     3Q    Max 
# -4.137 -0.456 -0.040  0.412  2.956 
# 
# Random effects:
#  Groups     Name        Variance      Std.Dev. 
#  condition  (Intercept)  16.053298091  4.006657
#  paper      (Intercept)   0.000000112  0.000335
#  experiment (Intercept)   0.000000778  0.000882
#  Residual               149.271235370 12.217661
# Number of obs: 386, groups:  condition, 14; paper, 6; experiment, 3
# 
# Fixed effects:
#                                  Estimate Std. Error       df t value Pr(>|t|)
# (Intercept)                        9.5238     5.8857 143.0053    1.62    0.108
# training_yesno1                    1.9406     2.2456   5.3129    0.86    0.425
# action_consequence1                2.9489     2.2291   9.9009    1.32    0.216
# location_object_goal_ambiguous1    4.7807     2.1749   9.5526    2.20    0.054
# agent1                             6.4554     2.6278  21.1836    2.46    0.023
# agent2                            -1.0533     1.7516  15.3225   -0.60    0.556
# bothobjects_present_visible_fam1   5.0575     1.9463  15.5349    2.60    0.020
# ageday                            -0.0677     0.0507 376.2822   -1.34    0.182
#                                   
# (Intercept)                       
# training_yesno1                   
# action_consequence1               
# location_object_goal_ambiguous1  .
# agent1                           *
# agent2                            
# bothobjects_present_visible_fam1 *
# ageday                            
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_1 lc___1 agent1 agent2 bt___1
# tranng_ysn1  0.292                                          
# actn_cnsqn1 -0.216 -0.003                                   
# lctn_bjc__1 -0.001 -0.001  0.630                            
# agent1      -0.001  0.287 -0.005 -0.167                     
# agent2      -0.013  0.430  0.262  0.085 -0.246              
# bthbjct___1 -0.131  0.003  0.270  0.160  0.563 -0.215       
# ageday      -0.898 -0.045  0.061  0.031  0.019  0.042 -0.059
# convergence code: 0
# boundary (singular) fit: see ?isSingular
# originally pre-registered model was rank deficient, needed to drop 2 variables
# dropped action_causal because it is redundant with action_consequence (i.e. there are no non-causal actions that are state changes)
# dropped object_diff_size_huge also because it is almost completely redundant with agent (i.e. all studies with a full human agent also are "yes" for "object_diff_size_huge")

# aa <- allFit(goals.m1)

summary(goals.m1)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: 
# look_pref ~ training_yesno + action_consequence + location_object_goal_ambiguous +  
#     agent + bothobjects_present_visible_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: goals
# Control: lmerControl(optimizer = "Nelder_Mead")
# 
# REML criterion at convergence: 3022
# 
# Scaled residuals: 
#    Min     1Q Median     3Q    Max 
# -4.137 -0.456 -0.040  0.412  2.956 
# 
# Random effects:
#  Groups     Name        Variance      Std.Dev. 
#  condition  (Intercept)  16.053298091  4.006657
#  paper      (Intercept)   0.000000112  0.000335
#  experiment (Intercept)   0.000000778  0.000882
#  Residual               149.271235370 12.217661
# Number of obs: 386, groups:  condition, 14; paper, 6; experiment, 3
# 
# Fixed effects:
#                                  Estimate Std. Error       df t value Pr(>|t|)
# (Intercept)                        9.5238     5.8857 143.0053    1.62    0.108
# training_yesno1                    1.9406     2.2456   5.3129    0.86    0.425
# action_consequence1                2.9489     2.2291   9.9009    1.32    0.216
# location_object_goal_ambiguous1    4.7807     2.1749   9.5526    2.20    0.054
# agent1                             6.4554     2.6278  21.1836    2.46    0.023
# agent2                            -1.0533     1.7516  15.3225   -0.60    0.556
# bothobjects_present_visible_fam1   5.0575     1.9463  15.5349    2.60    0.020
# ageday                            -0.0677     0.0507 376.2822   -1.34    0.182
#                                   
# (Intercept)                       
# training_yesno1                   
# action_consequence1               
# location_object_goal_ambiguous1  .
# agent1                           *
# agent2                            
# bothobjects_present_visible_fam1 *
# ageday                            
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_1 lc___1 agent1 agent2 bt___1
# tranng_ysn1  0.292                                          
# actn_cnsqn1 -0.216 -0.003                                   
# lctn_bjc__1 -0.001 -0.001  0.630                            
# agent1      -0.001  0.287 -0.005 -0.167                     
# agent2      -0.013  0.430  0.262  0.085 -0.246              
# bthbjct___1 -0.131  0.003  0.270  0.160  0.563 -0.215       
# ageday      -0.898 -0.045  0.061  0.031  0.019  0.042 -0.059
# convergence code: 0
# boundary (singular) fit: see ?isSingular
sjPlot::plot_model(goals.m1,
                   type = "est",
                   colors = "bw",
                   sort.est=TRUE,
                   axis.title=c("Effect (Unexpected - Expected in seconds)"),
                   show.values=TRUE,
                   show.p=TRUE)

sjPlot:: tab_model(goals.m1, show.stat=TRUE)
  look pref
Predictors Estimates CI Statistic p
(Intercept) 9.52 -2.01 – 21.06 1.62 0.106
training_yesno1 1.94 -2.46 – 6.34 0.86 0.387
action_consequence1 2.95 -1.42 – 7.32 1.32 0.186
location_object_goal_ambiguous1 4.78 0.52 – 9.04 2.20 0.028
agent1 6.46 1.30 – 11.61 2.46 0.014
agent2 -1.05 -4.49 – 2.38 -0.60 0.548
bothobjects_present_visible_fam1 5.06 1.24 – 8.87 2.60 0.009
ageday -0.07 -0.17 – 0.03 -1.34 0.182
Random Effects
σ2 149.27
τ00 condition 16.05
τ00 paper 0.00
τ00 experiment 0.00
ICC 0.10
N condition 14
N experiment 3
N paper 6
Observations 386
Marginal R2 / Conditional R2 0.092 / 0.181
plot(allEffects(goals.m1))

goals.interventions<- cbind(
  gen.beta(effectsize::standardize(goals.m1)),
  gen.m(goals.m1),
  gen.ci(goals.m1)[4:11,]
) 
# boundary (singular) fit: see ?isSingular
# Computing profile confidence intervals ...
cooks.goals <- cooks.distance(goals.m1, group = "subj")
# Warning in cooks.distance.lmerMod(goals.m1, group = "subj"): group is not a
# valid argument for this function. As of version 0.4.0, group has been replaced
# by level.
dotplot_diag(x = cooks.goals, cutoff = "internal", name = "cooks.distance",  index=goals$subj) + ylab("Cook's distance") + xlab("subjID")

goals.cooks <- goals[which(cooks.goals <= 4/386),]


# where are the influential observations?
nrow(goals)
# [1] 386
nrow(goals.cooks)
# [1] 367
goals.summary <- goals %>%
  group_by(paper,condition) %>%
  summarise(n = n())
# `summarise()` regrouping output by 'paper' (override with `.groups` argument)
goals.cooksn <- goals.cooks %>%
  group_by(paper,condition) %>%
  summarise(n_cooks = n()) 
# `summarise()` regrouping output by 'paper' (override with `.groups` argument)
goals.where.influential <- full_join(goals.summary, goals.cooksn) %>%
  mutate(n_excluded  = n - n_cooks)
# Joining, by = c("paper", "condition")
knitr::kable(goals.where.influential)
paper condition n n_cooks n_excluded
choi_unpub 1_equidistant 24 24 0
choi_unpub 1_fartarget 24 23 1
choi_unpub 1_neartarget 24 23 1
choi2018 1_hidden 16 16 0
choi2018 1_oneobject 16 15 1
choi2018 1_twoobject 16 16 0
gerson2014a 1_active 24 24 0
gerson2014a 1_control 24 24 0
gerson2014a 1_observation 24 24 0
gerson2014b 1_active 30 30 0
gerson2014b 1_observation 30 30 0
gerson2014b 2_generalization 30 30 0
luo2011 1_oneobject 12 7 5
luo2011 1_twoobject 12 6 6
luo2011 2_differentpositions 12 7 5
woo_unpub 1_statechange_objectswap 20 20 0
woo_unpub 2_disambiguatingobjectgoal 24 24 0
woo_unpub 3_disambiguatinglocationgoal 24 24 0
goals.m1.cooks <- lmer(data = goals.cooks,
     formula = look_pref ~ training_yesno  + action_consequence + location_object_goal_ambiguous + agent + bothobjects_present_visible_fam + ageday + (1|condition) + (1|experiment) + (1|paper),
     control = lmerControl(optimizer="Nelder_Mead"))
# boundary (singular) fit: see ?isSingular
goals.m1.cooks.beta <- lmer(data = goals.cooks,
     formula = scale(look_pref) ~ training_yesno  + action_consequence + location_object_goal_ambiguous + agent + bothobjects_present_visible_fam + ageday + (1|condition) + (1|experiment) + (1|paper),
     control = lmerControl(optimizer="Nelder_Mead"))
# boundary (singular) fit: see ?isSingular
summary(goals.m1.cooks.beta)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: 
# scale(look_pref) ~ training_yesno + action_consequence + location_object_goal_ambiguous +  
#     agent + bothobjects_present_visible_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: goals.cooks
# Control: lmerControl(optimizer = "Nelder_Mead")
# 
# REML criterion at convergence: 1037
# 
# Scaled residuals: 
#    Min     1Q Median     3Q    Max 
# -3.596 -0.483 -0.053  0.466  3.468 
# 
# Random effects:
#  Groups     Name        Variance Std.Dev.
#  condition  (Intercept) 0.0828   0.288   
#  paper      (Intercept) 0.0000   0.000   
#  experiment (Intercept) 0.0000   0.000   
#  Residual               0.9035   0.951   
# Number of obs: 367, groups:  condition, 14; paper, 6; experiment, 3
# 
# Fixed effects:
#                                   Estimate Std. Error        df t value
# (Intercept)                        0.50891    0.47116 160.07033    1.08
# training_yesno1                    0.17265    0.16407   4.71908    1.05
# action_consequence1                0.23160    0.17185   9.48200    1.35
# location_object_goal_ambiguous1    0.45104    0.17221   9.84636    2.62
# agent1                             0.42770    0.23344  24.13825    1.83
# agent2                            -0.05938    0.14395  17.47256   -0.41
# bothobjects_present_visible_fam1   0.23742    0.15967  11.46166    1.49
# ageday                            -0.00256    0.00409 358.01490   -0.63
#                                  Pr(>|t|)  
# (Intercept)                         0.282  
# training_yesno1                     0.344  
# action_consequence1                 0.209  
# location_object_goal_ambiguous1     0.026 *
# agent1                              0.079 .
# agent2                              0.685  
# bothobjects_present_visible_fam1    0.164  
# ageday                              0.532  
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_1 lc___1 agent1 agent2 bt___1
# tranng_ysn1  0.274                                          
# actn_cnsqn1 -0.198 -0.003                                   
# lctn_bjc__1 -0.003 -0.001  0.664                            
# agent1       0.045  0.238 -0.047 -0.206                     
# agent2      -0.057  0.382  0.278  0.146 -0.429              
# bthbjct___1 -0.118  0.004  0.258  0.126  0.554 -0.245       
# ageday      -0.906 -0.049  0.053  0.028 -0.010  0.054 -0.074
# convergence code: 0
# boundary (singular) fit: see ?isSingular
sjPlot::plot_model(goals.m1.cooks.beta,
                   type = "pred",
                   # colors = "bw",
                   sort.est=TRUE,
                   axis.title=c("Effect (Unexpected - Expected in seconds)"),
                   show.values=TRUE,
                   show.p=TRUE)
# $training_yesno

# 
# $action_consequence

# 
# $location_object_goal_ambiguous

# 
# $agent

# 
# $bothobjects_present_visible_fam

# 
# $ageday

plot(allEffects(goals.m1.cooks.beta))

regressionplot2 <- sjPlot::plot_model(goals.m1.cooks.beta,
                   type = "est",
                   colors = "bw",
                   sort.est=TRUE,
                   title = "",
                   axis.labels= c("Agent (person)",
                                  "Age in days",
                                  "Sticky mittens training",
                                  "State change",
                                  "Both objects present during fam/hab",
                                  "Agent (self-propelled object)",
                                  "Unambiguous location or object goal"),
                   axis.title=c("Effect on looking (unexpected - expected) in standard deviations"),
                   show.values=TRUE,
                   show.p=TRUE)

summary(goals.m1.cooks)
# Linear mixed model fit by REML. t-tests use Satterthwaite's method [
# lmerModLmerTest]
# Formula: 
# look_pref ~ training_yesno + action_consequence + location_object_goal_ambiguous +  
#     agent + bothobjects_present_visible_fam + ageday + (1 | condition) +  
#     (1 | experiment) + (1 | paper)
#    Data: goals.cooks
# Control: lmerControl(optimizer = "Nelder_Mead")
# 
# REML criterion at convergence: 2744
# 
# Scaled residuals: 
#    Min     1Q Median     3Q    Max 
# -3.596 -0.483 -0.053  0.466  3.468 
# 
# Random effects:
#  Groups     Name        Variance Std.Dev.
#  condition  (Intercept)   9.63    3.1    
#  paper      (Intercept)   0.00    0.0    
#  experiment (Intercept)   0.00    0.0    
#  Residual               105.05   10.2    
# Number of obs: 367, groups:  condition, 14; paper, 6; experiment, 3
# 
# Fixed effects:
#                                  Estimate Std. Error       df t value Pr(>|t|)
# (Intercept)                        6.8148     5.0804 160.0703    1.34    0.182
# training_yesno1                    1.8617     1.7692   4.7191    1.05    0.344
# action_consequence1                2.4973     1.8530   9.4820    1.35    0.209
# location_object_goal_ambiguous1    4.8635     1.8569   9.8464    2.62    0.026
# agent1                             4.6118     2.5171  24.1382    1.83    0.079
# agent2                            -0.6402     1.5522  17.4726   -0.41    0.685
# bothobjects_present_visible_fam1   2.5601     1.7217  11.4617    1.49    0.164
# ageday                            -0.0276     0.0441 358.0149   -0.63    0.532
#                                   
# (Intercept)                       
# training_yesno1                   
# action_consequence1               
# location_object_goal_ambiguous1  *
# agent1                           .
# agent2                            
# bothobjects_present_visible_fam1  
# ageday                            
# ---
# Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# 
# Correlation of Fixed Effects:
#             (Intr) trnn_1 actn_1 lc___1 agent1 agent2 bt___1
# tranng_ysn1  0.274                                          
# actn_cnsqn1 -0.198 -0.003                                   
# lctn_bjc__1 -0.003 -0.001  0.664                            
# agent1       0.045  0.238 -0.047 -0.206                     
# agent2      -0.057  0.382  0.278  0.146 -0.429              
# bthbjct___1 -0.118  0.004  0.258  0.126  0.554 -0.245       
# ageday      -0.906 -0.049  0.053  0.028 -0.010  0.054 -0.074
# convergence code: 0
# boundary (singular) fit: see ?isSingular
plot(allEffects(goals.m1.cooks))

sjPlot:: tab_model(goals.m1.cooks.beta, show.stat=TRUE)
  scale(look pref)
Predictors Estimates CI Statistic p
(Intercept) 0.51 -0.41 – 1.43 1.08 0.280
training_yesno1 0.17 -0.15 – 0.49 1.05 0.293
action_consequence1 0.23 -0.11 – 0.57 1.35 0.178
location_object_goal_ambiguous1 0.45 0.11 – 0.79 2.62 0.009
agent1 0.43 -0.03 – 0.89 1.83 0.067
agent2 -0.06 -0.34 – 0.22 -0.41 0.680
bothobjects_present_visible_fam1 0.24 -0.08 – 0.55 1.49 0.137
ageday -0.00 -0.01 – 0.01 -0.63 0.531
Random Effects
σ2 0.90
τ00 condition 0.08
τ00 paper 0.00
τ00 experiment 0.00
N condition 14
N experiment 3
N paper 6
Observations 367
Marginal R2 / Conditional R2 0.092 / NA
goals.interventions.cooks<- cbind(
  gen.beta(effectsize::standardize(goals.m1.cooks)),
  gen.m(goals.m1.cooks),
  gen.ci(goals.m1.cooks)[4:11,]
) 
# boundary (singular) fit: see ?isSingular
# Computing profile confidence intervals ...
(regressionplot1 | regressionplot2) + plot_annotation(tag_levels = 'A')

Generating BFs

goals.full <- lmer(data = goals,
     formula = look_pref ~ training_yesno  + action_consequence + location_object_goal_ambiguous + agent + bothobjects_present_visible_fam + ageday + (1|condition) + (1|experiment) + (1|paper), REML=FALSE,
     control = lmerControl(optimizer="Nelder_Mead")) # REML set to false to enable model comparison via likelihood methods, change optimizer to Nelder Mead to deal with failures to converge
# boundary (singular) fit: see ?isSingular
goals.predictors=c("training_yesno","action_consequence","location_object_goal_ambiguous","agent","bothobjects_present_visible_fam","ageday")

goals.bf.full <- data.frame(goals.predictors) %>%
  mutate(BF = NA,
         Interpretation = NA) %>%
  rename(Fixed.Effect = goals.predictors)

for (predictor in goals.predictors) {
  modelform <- update(goals.full, as.formula(paste0(". ~ . -",predictor)))
  BF <- exp((BIC(modelform) - BIC(goals.full))/2)
  whichrow <- which(goals.bf.full$Fixed.Effect == as.character(predictor))
  goals.bf.full[whichrow,2] <- BF
  goals.bf.full[whichrow,3] <-interpret_bf(BF)
}
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
goals.bf.full
#                      Fixed.Effect     BF                  Interpretation
# 1                  training_yesno 0.1085       moderate evidence against
# 2              action_consequence 0.1368       moderate evidence against
# 3  location_object_goal_ambiguous 1.1930 anecdotal evidence in favour of
# 4                           agent 0.1254       moderate evidence against
# 5 bothobjects_present_visible_fam 2.0666 anecdotal evidence in favour of
# 6                          ageday 0.1069       moderate evidence against
goals.full.cooks <- lmer(data = goals.cooks,
     formula = look_pref ~ training_yesno  + action_consequence + location_object_goal_ambiguous + agent + bothobjects_present_visible_fam + ageday + (1|condition) + (1|experiment) + (1|paper), REML=FALSE,
     control = lmerControl(optimizer="Nelder_Mead")) # REML set to false to enable model comparison via likelihood methods, change optimizer to Nelder Mead to deal with failures to converge
# boundary (singular) fit: see ?isSingular
goals.predictors=c("training_yesno","action_consequence","location_object_goal_ambiguous","agent","bothobjects_present_visible_fam","ageday")

goals.bf.cooks <- data.frame(goals.predictors) %>%
  mutate(BF.cooks = NA,
         Interpretation.cooks = NA) %>%
  rename(Fixed.Effect = goals.predictors)

for (predictor in goals.predictors) {
  modelform <- update(goals.full.cooks, as.formula(paste0(". ~ . -",predictor)))
  BF <- exp((BIC(modelform) - BIC(goals.full.cooks))/2)
  whichrow <- which(goals.bf.cooks$Fixed.Effect == as.character(predictor))
  goals.bf.cooks[whichrow,2] <- BF
  goals.bf.cooks[whichrow,3] <- interpret_bf(BF)
}
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
# boundary (singular) fit: see ?isSingular
goals.bf.cooks
#                      Fixed.Effect BF.cooks           Interpretation.cooks
# 1                  training_yesno  0.17916      moderate evidence against
# 2              action_consequence  0.14128      moderate evidence against
# 3  location_object_goal_ambiguous  3.96554 moderate evidence in favour of
# 4                           agent  0.05074        strong evidence against
# 5 bothobjects_present_visible_fam  0.32410      moderate evidence against
# 6                          ageday  0.05838        strong evidence against
goals.bf <- full_join(goals.bf.full, goals.bf.cooks) %>%
    arrange(desc(BF))
# Joining, by = "Fixed.Effect"
knitr::kable(goals.bf)
Fixed.Effect BF Interpretation BF.cooks Interpretation.cooks
bothobjects_present_visible_fam 2.0666 anecdotal evidence in favour of 0.3241 moderate evidence against
location_object_goal_ambiguous 1.1930 anecdotal evidence in favour of 3.9655 moderate evidence in favour of
action_consequence 0.1368 moderate evidence against 0.1413 moderate evidence against
agent 0.1254 moderate evidence against 0.0507 strong evidence against
training_yesno 0.1085 moderate evidence against 0.1792 moderate evidence against
ageday 0.1069 moderate evidence against 0.0584 strong evidence against

We again computed Bayes Factors for each of the fixed effects, which expresses the strength of evidence for the H0 (i.e. the hypothesis that the fixed effect has no predictive power in the model) versus H1 (i.e. the hypothesis that the fixed effect has predictive power in the model). The Bayes Factor associated with each fixed effect is shown in Table X. There was anecdotal evidence that seeing an unambiguous object or location goal (BF = 1.193), and seeing both objects during familiarization or habituation (BF = 2.0666), influenced infants’ responses at test. There was moderate evidence against the hypotheses that action training (BF = 0.1085), action consequence (BF = 0.1368), type of agent (BF = 0.1254), and infant age (BF = 0.1069), influenced infants’ looking preferences.

Exploratory Analysis

library(rstan)
library(brms)
goals.e1 <- brm(formula = look_pref ~ training_yesno + (1+condition|paper),
                data= goals, 
                warmup = 100, 
                iter   = 200, 
                chains = 2, 
                inits  = "random",
                cores  = 2,
                save_all_pars = TRUE)
summary(goals.e1)

goals.e2 <- brm(formula = look_pref ~ 1 + (1+condition|paper),
                data= goals, 
                warmup = 100, 
                iter   = 200, 
                chains = 2, 
                inits  = "random",
                cores  = 2,
                save_all_pars = TRUE)
summary(goals.e2)

bayes_factor(goals.e1, goals.e2)


goals.full <- lmer(data = goals,
     formula = look_pref ~ training_yesno + (1+training_yesno|paper))
goals.null <- lmer(data = goals,
     formula = look_pref ~ 1 + (1|paper))

exp((BIC(goals.null) - BIC(goals.full))/2)

goals.full <- lmer(data = goals,
     formula = look_pref ~ location_object_goal_ambiguous + (1|paper))
goals.null <- lmer(data = goals,
     formula = look_pref ~ 1 + (1|paper))
exp((BIC(goals.null) - BIC(goals.full))/2)

We note that the analysis we reported deviates from our pre-registration plan. In that plan, we included 8 fixed effects, including a fixed effect for age in days, one picking out a control condition, and 6 others. However, 2 of these fixed effects were either completely or partially redundant with other predictors in the model, which caused a rank deficiency issue. Thus, we dropped these two fixed effects from the model. We report this new model below.

When we compared the effects of different variants, we found that making the goal of the agent’s actions unambiguous (marginal effect, [-0.967,6.147], ß=0.37, B=4.781, SE=2.175, p=0.054, two-tailed), or presenting a self-propelled object (marginal effect, [0.993,7.911], ß=0.5, B=6.455, SE=2.628, p=0.023, two-tailed) or an entire person ([1.982,10.67], ß=-0.082, B=-1.053, SE=1.752, p=0.556, two-tailed), relative to seeing a hand appear from off-stage, increased infants’ looking preference for the unexpected event. We did not find any other effects, including age, motor training, or action consequences, when taking into account all of these predictors in the same model. We did not find an effect of age, [1.209,7.725], ß=-0.068, B=-0.068, SE=0.051, p=0.182, two-tailed. These analyses included 386 total infants, 19 of whom were classified as influential based on Cook’s Distance. Excluding these influential participants in the analysis produced similar results, except that the goal ambiguity effect crossed the threshold into significance ([-0.754,4.979], ß=0.451, B=4.863, SE=1.857, p=0.026, two-tailed), and the two effects of the agent type crossed the threshold out of significance (marginal effect of object, [1.421,7.294], ß=0.428, B=4.612, SE=2.517, p=0.079, two-tailed; non-significant effect of entire person, [0.994,9.402], ß=-0.059, B=-0.64, SE=1.552, p=0.685, two-tailed).

Exploratory Analysis

Visualization

ind.data.long <- ind.data %>%
  gather(look_type, look_sec, unexp_look:exp_look) %>%
  mutate(look_log = log(look_sec))

(eplot1 <- ggplot(ind.data, aes(x = log(exp_look), y=log(unexp_look))) +
    geom_point()) +
  geom_smooth()+
  facet_wrap(paper~PI_group)
# `geom_smooth()` using method = 'loess' and formula 'y ~ x'

(eplot2 <- ggplot(ind.data.long, aes(x = ageday, y=look_log)) +
    geom_point() +
    geom_smooth(method = "lm")+
  facet_wrap(look_type~action_causal))
# `geom_smooth()` using formula 'y ~ x'

Impute Sommerville Data

library(mice)
# 
# Attaching package: 'mice'
# The following objects are masked from 'package:base':
# 
#     cbind, rbind
set.seed(429)

# 0_pilotnotraining
# n = 16, mean age = 106.54
# UNEXP, M = 53.9, SD = 14.9
# EXP, M = 47.1, SD = 12.4

# 1_watchfirst
# n = 15, mean age = 101.32
# UNEXP, M = 33.781, SD = 7.283212518
# EXP, M = 33.417, SD = 4.740388137
# t = 0.9

# 1_reachfirst
# n = 15, mean age = 104.32
# UNEXP, M = 53.992, SD = 11.10689909
# EXP, M = 30.504, SD = 4.369927511
# t = 2.6

ind.data.long %>%
  filter(PI_group=="woodward") %>%
  ggplot(aes(look_sec)) +
  geom_histogram()
# `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.

v1 <- rlnorm(16, meanlog=3.987, sdlog=.25)
mean(v1)
# [1] 56.2
sd(v1)
# [1] 12.19
v2 <- rlnorm(16, meanlog=3.852, sdlog=.24)
mean(v2)
# [1] 49.52
sd(v2)
# [1] 10.59
ind.data.long$look_log
#    [1]  4.09434  4.09434  3.83514  2.61007  2.48491  4.09434  2.07944  3.04452
#    [9]  4.09434  3.75420  1.97408  1.87180  1.93152  4.09434  1.85630  1.87180
#   [17]  3.70377  1.66771  2.70136  1.62924  2.37024  3.90399  4.09434  1.91692
#   [25]  4.09434  2.32239  3.21888  3.59731  4.09434  3.97968  3.96840  4.09434
#   [33]  2.07944  2.25129  3.40120  4.09434  2.17853  2.17475  1.53328  1.43508
#   [41]  2.41293  2.76001  1.88201  0.80350  2.93916  1.33500  1.71680  2.72785
#   [49]  3.56199  1.72870  2.84685  2.80739  1.97870  1.81916  2.79321  2.42775
#   [57]  1.58924  2.83321  2.18979  2.61496  1.66140  3.00072  1.98330  2.78295
#   [65]  2.15948  2.38570  1.69255  2.61740  2.20460  2.58274  2.45959  1.51146
#   [73]  2.06686  1.45083  3.09104  2.45959  1.50408  0.78846  2.49045  2.01934
#   [81]  2.31911  1.86666  2.69688  2.75154  2.09597  2.29590  1.41908  2.71469
#   [89]  2.47092  3.39563  2.60022  1.79730  2.57769  2.92316  3.38665  2.37024
#   [97]  2.68102  2.22282  2.46810  2.34820  2.89037  1.95066  1.58241  2.96011
#  [105]  2.01934  2.43944  2.41293  2.50416  2.71469  1.95066  1.92181  2.94619
#  [113]  2.81141  1.78059  1.91692  2.53634  2.42480  2.07944  2.94619  1.85108
#  [121]  1.84055  3.25296  2.48213  2.23359  2.74084  3.68721  1.92668  2.67644
#  [129]  2.52038  3.12530  1.74630  3.80666  1.37793  2.10413  2.39182  2.13219
#  [137]  1.86149  1.86149  2.27556  2.36399  1.98330  2.79117  2.30923  3.25681
#  [145]  2.40394  2.53634  2.66723  3.26066  1.10526  2.95230  2.13061  1.67896
#  [153]  3.87950  1.89160  1.84845  1.62334  2.50634  2.88088  1.10526  1.95869
#  [161]  4.15888  2.36556  0.24686  1.48387  1.46326  2.29152  1.11186  0.81093
#  [169]  3.38642  2.50553  2.72524  1.66203  2.28442  2.29657  2.18717  3.07915
#  [177]  1.37877  3.67453  0.00995  2.05924  2.46895  0.27763  1.77834  0.31481
#  [185]  0.94779  1.75786  1.42311  1.10194  0.72755  2.67622  0.46373  0.58222
#  [193]  1.71740  1.93442  2.34469  0.47623  1.21788  2.58173  2.88480  2.26592
#  [201]  1.10856  1.84055  3.34604  1.34286  2.15640  0.27003  2.22029  1.34025
#  [209]  3.21366  1.02962  1.14103  1.07841  1.89912  1.64094  0.98208  1.83098
#  [217] -0.54473  2.88256  2.16447  0.57098  1.67896  1.44456  2.92370  0.76547
#  [225]  1.89912  2.82435  1.50408  2.07317  2.52892  0.81093  0.66783  2.67000
#  [233]  2.35613  2.29253  2.32239  0.80200  3.52988  1.21788  0.13103  3.44967
#  [241]  2.61958  2.80880 -0.21072  2.06686  0.37156  1.42311  0.87547  1.04028
#  [249]  1.23547  3.37485  1.17248  1.69928  1.08519  1.06471  1.15688  2.27829
#  [257]  0.59333  2.37584  1.18173  2.25759  0.27763  1.57277  2.84781  2.79911
#  [265]  0.68813  3.03158  1.55814  1.64866  2.26280  1.47247  3.56303  1.92425
#  [273]  2.52413  1.04732  3.46261  2.96218  0.28518  1.65632  2.51608  1.04028
#  [281]  1.29746 -1.71480  0.60432  0.46373  1.40364  1.00796  1.82616  1.45161
#  [289]  0.81093  1.66203  0.45108  0.80648  1.26976 -0.07257  0.17395  2.64901
#  [297]  2.43274  2.06939  2.55101  2.23431  2.04640  2.64191  1.32708  1.55181
#  [305]  2.02155  3.22486  2.59898  1.84372  2.63763  2.96733  2.22462  3.13549
#  [313]  2.50960  4.05178  2.00821  3.52342  2.51770  3.48124  1.87947  3.81110
#  [321]  4.09434  4.09434  4.09434  3.86912  3.56247  2.49321  1.84845  3.77161
#  [329]  2.36556  3.66612  3.52636  2.55723  3.84588  3.63759  4.09434  3.18013
#  [337]  3.49043  2.83027  4.09434  2.44669  4.08933  3.57375  3.93574  3.14415
#  [345]  3.82428  3.31237  2.72130  2.58400  3.14845  1.88707  3.74123  2.00821
#  [353]  2.28238  3.16548  2.19165  3.50255  3.61631  2.14007  2.51366  3.73886
#  [361]  3.26576  4.09434  3.55392  2.45959  3.89488  2.64971  3.37759  2.04122
#  [369]  3.69759  3.14845  3.01798  3.62700  2.03471  4.09434  3.26767  2.36085
#  [377]  4.09434  2.80033  3.01308  1.83258  3.01798  1.93874  2.94707  4.09434
#  [385]  3.39451  1.71380  3.62301  3.10459  4.09434  2.07317  2.91235  3.92494
#  [393]  4.09434  4.09434  2.32728  2.05412  1.88707  4.09434  3.29213  2.72130
#  [401]  3.63890  3.30505  2.03471  1.98100  2.12823  2.07317  3.33932  3.53076
#  [409]  2.73112  2.47233  2.69463  2.07317  2.52573  3.70868  2.49321  3.62966
#  [417]  3.41280  4.09434  2.19722  3.27336  2.38876  3.01553  2.32728  3.07269
#  [425]  2.87074  3.68261  2.54160  2.68444  3.46261  2.02155  2.61983  3.52930
#  [433]  2.77259  3.25424  3.29089  2.23359  2.75790  3.02367  1.92181  2.98400
#  [441]  2.42185  2.62225  1.63576  1.99697  3.07731  1.60944  2.55205  2.55723
#  [449]  2.85071  2.55205  2.09186  3.02868  1.48915  1.61608  1.16315  2.07527
#  [457]  2.37024  2.53370  1.72277  1.82455  1.65505  2.81541  1.92181  3.45210
#  [465]  1.71079  1.82991  1.58924  1.49664  0.79901  2.02375  2.41323  1.30833
#  [473]  1.96009  2.72785  2.34469  1.93152  1.93586  1.96431  2.82731  2.92692
#  [481]  3.56303  2.23001  3.18635  2.81361  2.40695  2.93013  2.48740  3.44362
#  [489]  3.04118  3.10772  1.36864  1.13140  1.88099  1.80665  2.19054  2.31451
#  [497]  2.18605  3.62140  2.71866  1.91692  2.52011  2.36085  2.78501  0.74669
#  [505]  1.89762  1.17865  2.88648  2.49403  2.18717  2.94128  2.38876  3.27071
#  [513]  2.83849  1.75613  2.52892  2.01357  2.81061  2.06433  2.29556  1.31641
#  [521]  2.86619  2.23001  2.41413  1.43270  1.80665  2.93492  1.14422  1.97130
#  [529]  2.08194  1.46557  2.95282  1.91398  1.66203  2.50553  2.05668  3.55678
#  [537]  2.82019  2.78994  3.11574  2.54788  1.90658  3.29138  3.21808  3.41477
#  [545]  3.06386  3.04547  3.12720  1.76815  2.74663  1.92425  2.61667  1.79009
#  [553]  3.01847  2.93066  2.53845  3.68938  2.83908  3.20599  1.77495  2.87187
#  [561]  3.12808  2.47654  2.02771  3.43086  2.06348  1.68516  2.17324  2.25863
#  [569]  1.46711  0.95166  2.92692  1.72988  1.74572  2.11304  2.23645  2.58072
#  [577]  2.19722  1.94781  2.27316  1.88403  2.22498  2.69553  2.80481  2.36800
#  [585]  3.81154  2.53211  2.72116  2.41770  3.55040  3.76983  3.26500  2.11850
#  [593]  3.03697  3.42922  3.05742  2.76136  3.16730  2.32461  3.39397  3.36345
#  [601]  2.91510  2.27809  1.26882  2.92665  1.27397  2.31451  1.85003  2.82217
#  [609]  1.29381  0.54426  2.27863  2.31320  2.09924  2.08443  1.70958  3.12559
#  [617]  2.02815  2.31022  2.44206  2.51850  4.17053  3.52998  2.25024  2.01890
#  [625]  1.81970  3.14300  2.89000  2.96870  2.14943  1.81970  3.53154  3.77169
#  [633]  2.53078  1.52606  3.23330  0.88652  2.65582  2.67805  0.18786  2.78542
#  [641]  1.43111  3.08267  2.12386  1.70475  1.86149  1.70838  1.85265  2.89646
#  [649]  2.10128  1.09639  2.57261  2.91235  2.71469  1.96009  1.75786  2.19722
#  [657]  2.93916  1.79176  3.43399  3.75887  3.16125  1.74047  3.77276  3.53223
#  [665]  1.74047  2.12823  4.09434  2.31254  2.11626  2.40695  3.69635  4.09434
#  [673]  3.45632  4.09434  2.40695  1.68640  2.65324  3.16969  2.78501  3.21084
#  [681]  3.00568  3.57235  3.00568  3.28466  3.70623  3.83298  2.02375  1.58241
#  [689]  1.82991  1.41908  2.13614  2.55723  2.01490  0.84730  3.05085  1.51146
#  [697]  1.82455  1.88201  3.48021  1.32619  2.23001  2.14788  1.55463  1.54756
#  [705]  3.05085  2.64617  2.49321  2.91597  2.19351  2.91054  2.30259  3.44255
#  [713]  2.66954  2.55464  2.24425  2.32239  1.39459  2.31911  2.62225  2.43944
#  [721]  1.93634  1.84583  2.67874  2.09186  3.05243  2.31583  1.85630  0.83291
#  [729]  2.10006  2.66954  2.00148  2.91597  2.93030  3.09407  2.41293  3.18635
#  [737]  1.48160  2.31254  2.12823  3.17944  2.68558  2.22642  2.81541  2.98736
#  [745]  3.30199  2.26176  2.48768  1.58241  2.01490  1.85108  3.02529  1.84583
#  [753]  1.60275  2.44813  2.89775  1.98787  1.93152  2.06263  1.70475  1.96009
#  [761]  1.62924  2.33860  1.75209  1.05315  2.23359  2.49596  2.21193  1.82991
#  [769]  2.45674  2.02375  2.27556  3.03013  1.82455  1.85630  2.29590  3.56859
#  [777]  2.20460  2.96527  2.96527  3.34404  1.84055  3.80666  1.70475  2.52038
#  [785]  2.20092  1.92668  2.79117  1.85108  2.04122  2.87168  1.68021  2.78912
#  [793]  1.88707  1.79730  2.34820  2.69688  2.27213  3.47507  2.20827  2.27419
#  [801]  1.70475  0.71295  3.95508  2.22678  0.76547  2.72785  2.29958  1.49290
#  [809]  0.62058  2.33311  3.25887  1.13783 -0.10536  2.21485  0.23111  1.18784
#  [817]  0.58222  1.76473  3.70229  1.31909  2.15987  1.26130  1.71919  2.72720
#  [825]  1.46557  2.73372  1.36354  2.86334  2.37398  1.89462  2.51366  1.54543
#  [833]  3.50856  1.65632  0.16551  1.16315  2.10535  0.85015  1.19392  1.59534
#  [841]  1.20896  0.89609  1.86563  1.21491  3.52282  0.54812  2.52413  3.18013
#  [849]  3.47816  1.51072  3.00271  0.27763  2.63189  2.27624  2.23216  0.85866
#  [857]  1.92862  1.09527  3.47352  0.74194  0.28518  2.01490  1.32972  1.43270
#  [865]  0.53063  1.35325  0.69813  3.56841  2.49897  1.66013  1.03318  1.77326
#  [873]  1.49962  1.38379  0.82855  1.77326  1.30019  1.87026  1.69010  1.22964
#  [881]  0.67803  2.20607  1.55181  1.79675  1.13462  1.08519  1.48160  2.58551
#  [889]  1.78842  1.33763  2.43973  2.84433  1.92716  2.62177 -0.16252  1.38128
#  [897]  1.50185  0.06766  1.71740  2.97757  2.06306  1.47933  1.15688  1.47705
#  [905]  0.42527  2.38968  0.80648  2.31550  0.90016  2.28442 -0.71335  1.99334
#  [913]  3.96138  2.50471  0.54812  3.68362  2.17475  0.38526  2.61301  2.70738
#  [921]  3.34145  2.68034  2.37862  1.90954  1.81156  1.08181  1.87026  1.34547
#  [929]  2.26903  1.50185  1.47705  0.77932  0.68310  0.86710  1.34547  0.84157
#  [937]  0.84157  1.08181  2.00013  1.07500  0.19062  1.09192  1.64481  0.24686
#  [945]  1.87487  2.10657  2.43973  1.18173  0.87547  2.50062  1.75613  2.66792
#  [953]  2.21703  1.81319  2.55490  2.21485  1.58104  1.67147  2.88200  3.14415
#  [961]  2.68102  2.55723  1.93152  3.60821  1.98787  3.18221  3.03975  3.24454
#  [969]  2.60639  3.81881  3.71844  3.71844  3.92593  3.54385  3.71479  2.17475
#  [977]  2.48491  4.00915  2.95751  2.59152  3.56105  3.33220  2.09186  3.51898
#  [985]  3.66868  3.70991  3.94255  3.89589  3.80999  2.30259  3.48124  3.70746
#  [993]  4.09434  2.64971  4.09434  2.72785  2.10413  2.19722  3.04927  2.10413
# [1001]  4.09434  3.31600  3.50556  2.64971  2.51366  2.11626  3.46886  3.52489
# [1009]  2.46810  4.09434  3.68888  3.99636  3.20275  2.35138  3.72930  2.13417
# [1017]  3.73050  2.23538  4.09434  2.94969  3.34639  1.77495  2.74727  3.83190
# [1025]  3.61765  3.15274  4.02535  2.76317  2.63189  1.89462  2.46385  3.85227
# [1033]  2.28747  3.66612  1.88707  2.27213  3.73886  1.98787  4.08177  1.61939
# [1041]  3.16336  4.09434  3.79773  3.35341  3.00568  1.69562  1.95303  3.81331
# [1049]  3.56953  2.96527  2.44669  3.07501  1.82455  2.16332  2.51366  1.88707
# [1057]  3.48584  3.61765  3.29953  2.51366  1.93152  3.50104  2.85071  2.52172
# [1065]  3.55392  3.51898  4.09434  4.06217  1.93152  2.48491  2.38876  2.39790
# [1073]  2.11626  1.88707  2.74084  3.73767  3.49347  3.52194  3.45632  2.56110
# [1081]  2.36085  3.57888  2.14398  3.20815  2.79117  2.27556  2.59276  2.44524
# [1089]  2.89407  2.43653  2.15176  2.86600  1.71079  1.12059  2.05412  1.30833
# [1097]  2.51500  1.84055  2.33537  2.48213  3.09104  3.37577  1.97408  1.98787
# [1105]  1.59601  2.24425  2.34501  2.45959  1.45083  1.54756  2.34181  3.34521
# [1113]  1.76359  2.60269  1.62268  1.90211  1.75786  2.02375  0.58222  1.76359
# [1121]  2.15987  1.66771  2.90142  2.91777  2.83498  1.84530  2.22678  2.18942
# [1129]  2.08318  2.48491  2.61227  2.26903  3.28952  3.44585  3.55048  3.06945
# [1137]  2.05796  3.58269  3.16548  2.29958  2.55878  1.41099  1.89010  2.52892
# [1145]  3.23632  1.17248  2.00013  3.80666  2.02815  2.57794  2.95282  3.05494
# [1153]  2.39060  1.37372  1.61741  1.53039  2.18155  2.29455  2.20717  3.62354
# [1161]  1.90211  2.24284  3.32468  1.15057  2.45359  2.09433  2.73631  1.79840
# [1169]  0.59884  1.70293  2.40964  2.16217  2.11021  0.80648  2.44408  2.11986
# [1177]  1.09192  2.02022  1.83098  0.59884  2.16905  1.87334  0.87129  1.73871
# [1185]  1.22671  3.13506  2.17361  2.14242  1.84688  2.57870  2.26072  2.18493
# [1193]  3.37588  3.01504  2.95751  3.05918  3.18013  2.14593  2.96269  2.18942
# [1201]  2.80397  2.80457  2.57185  2.40243  2.47064  3.54789  2.72981  2.89536
# [1209]  2.45187  2.04511  3.03110  2.14943  2.47373  2.90781  1.97778  1.51659
# [1217]  2.37893  1.44378  1.13462  1.59195  2.45473  1.71019  1.69684  2.04769
# [1225]  2.36993  2.07401  1.49365  1.72988  2.58626  1.72752  2.18305  2.17399
# [1233]  2.51667  2.57703  3.81229  2.33563  1.73360  2.29707  3.79664  3.36521
# [1241]  2.28388  2.75633  2.40550  3.46555  3.28003  2.76992  3.13431  3.06327
# [1249]  3.69187  3.48380  2.29650  2.68317  0.34831  1.37624  1.59534  1.63803
# [1257]  2.33052  1.93970  0.89336  0.45108  2.15987  2.55593  1.06241  2.21739
# [1265]  1.24030  1.75613  2.02859  2.37645  2.37893  1.40118  3.83161  2.44986
# [1273]  2.01089  2.90563  1.37287  3.18910  2.78891  1.64352  2.31353  1.84530
# [1281]  2.59326  2.73263  2.94602  1.37456  1.37456  1.00796  1.36524  1.71979
# [1289]  1.08631  1.92716  1.55604  2.82257  1.57277  1.50962  1.75152  1.14103
# [1297]  2.84219  1.61010  1.13676  1.24607
t.test(v1,v2, paired = TRUE)
# 
#   Paired t-test
# 
# data:  v1 and v2
# t = 1.6, df = 15, p-value = 0.1
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
#  -1.997 15.370
# sample estimates:
# mean of the differences 
#                   6.686